Author: Suresh Venkatesan
Trained_Models: All the models and history files that are used in the results summary of this notebook can be downloaded from the "Training_Outputs" sub-folder present in this Link
### Use this for Google Colab
from google.colab import drive
drive.mount('/content/drive')
Mounted at /content/drive
%%capture
!pip install -q -U keras-tuner
!wget https://raw.githubusercontent.com/surmenok/keras_lr_finder/master/keras_lr_finder/lr_finder.py .
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import time
import datetime
import os
import shutil
import pytz
import math
import cv2
import pprint
%matplotlib inline
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers, regularizers, models, Model
from tensorflow.keras import optimizers, losses, metrics
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Input, Flatten, Dense, Activation, Conv2D, MaxPooling2D
from tensorflow.keras.layers import BatchNormalization, Dropout
from tensorflow.keras.datasets import mnist
from tensorflow.keras.optimizers.schedules import InverseTimeDecay
from tensorflow.keras.callbacks import TensorBoard, ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.utils import to_categorical
from tensorflow.keras.utils import Sequence
from tensorflow.keras.preprocessing.image import apply_affine_transform
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications import vgg16
from tensorflow.keras.applications import resnet50
from tensorflow.keras.applications import mobilenet
from tensorflow.keras.applications import inception_v3
from tensorflow.keras.applications import densenet
from tensorflow.keras.applications import nasnet
from tensorflow.keras.applications import EfficientNetB0, EfficientNetB1, EfficientNetB2, EfficientNetB3
from tensorflow.keras.applications import EfficientNetB4, EfficientNetB5, EfficientNetB6, EfficientNetB7
from tensorflow.keras.applications import efficientnet
from tensorflow.keras.applications import inception_resnet_v2
from tensorflow.keras.applications import InceptionResNetV2
import kerastuner as kt
from kerastuner import HyperModel
from tqdm import tqdm
from sklearn.model_selection import train_test_split
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug.augmentables.bbs import BoundingBox, BoundingBoxesOnImage
from imgaug.augmentables.batches import Batch
# Ignore the warnings
import warnings
warnings.filterwarnings("ignore")
%load_ext tensorboard
# Define base path for TensorBoard Logs directory
tb_logs_base_path = "/content/drive/MyDrive/AI_ML_Folder/Colab_Directory/Model_Outputs/Stanford_Car_Dataset/TB_Logs/"
os.makedirs(tb_logs_base_path, exist_ok = True) # Don't raise any exception if directory exists
# Define base path for Keras Tuner Logs directory
kt_logs_base_path = "/content/drive/MyDrive/AI_ML_Folder/Colab_Directory/Model_Outputs/Stanford_Car_Dataset/KT_Logs/"
os.makedirs(kt_logs_base_path, exist_ok = True) # Don't raise any exception if directory exists
# Define base path for storing all outputs related to model / training
out_base_path = "/content/drive/MyDrive/AI_ML_Folder/Colab_Directory/Model_Outputs/Stanford_Car_Dataset/Training_Outputs/"
os.makedirs(out_base_path, exist_ok = True) # Don't raise any exception if directory exists
# Define base path of dataset
dataset_path = "/content/drive/MyDrive/AI_ML_Folder/Colab_Directory/Datasets/Image_Datasets/Stanford_Car_Dataset/"
# Copy dataset from Google Drive
img_zip_file_name = "Consolidated_Dataset.zip"
img_zip_file_path = os.path.join(dataset_path, img_zip_file_name)
# Get start time of run and display it
start_time = datetime.datetime.now(pytz.timezone('Asia/Kolkata'))
print("Started dataset copy at %s..." %(start_time.strftime("%H:%M:%S")), end = ' ')
!cp {img_zip_file_path} .
# Unzip dataset file
print("Unzipping dataset...")
!unzip -q Consolidated_Dataset.zip
# Delete zip file
!rm Consolidated_Dataset.zip
# Get end time of run and display elapsed time
end_time = datetime.datetime.now(pytz.timezone('Asia/Kolkata'))
elap_time = ((end_time - start_time).total_seconds())/60
print("Completed at %s. Elapsed time = %0.2f minutes." %(end_time.strftime("%H:%M:%S"), elap_time))
Started dataset copy at 07:25:31... Unzipping dataset... Completed at 07:26:35. Elapsed time = 1.06 minutes.
# Load train annotation file in a DataFrame
ann_train_csv_path = './annot_train_cons.csv'
train_df = pd.read_csv(ann_train_csv_path)
display(train_df.head())
| filename | xmin | ymin | xmax | ymax | label | img_h | img_w | class | |
|---|---|---|---|---|---|---|---|---|---|
| 0 | 00001.jpg | 39 | 116 | 569 | 375 | 14 | 400.0 | 600.0 | Audi TTS Coupe 2012 |
| 1 | 00002.jpg | 36 | 116 | 868 | 587 | 3 | 675.0 | 900.0 | Acura TL Sedan 2012 |
| 2 | 00003.jpg | 85 | 109 | 601 | 381 | 91 | 480.0 | 640.0 | Dodge Dakota Club Cab 2007 |
| 3 | 00004.jpg | 621 | 393 | 1484 | 1096 | 134 | 1386.0 | 2100.0 | Hyundai Sonata Hybrid Sedan 2012 |
| 4 | 00005.jpg | 14 | 36 | 133 | 99 | 106 | 108.0 | 144.0 | Ford F-450 Super Duty Crew Cab 2012 |
# Load test annotation file in a DataFrame
ann_test_csv_path = './annot_test_cons.csv'
test_orig_df = pd.read_csv(ann_test_csv_path)
display(test_orig_df.head())
| filename | xmin | ymin | xmax | ymax | label | img_h | img_w | class | |
|---|---|---|---|---|---|---|---|---|---|
| 0 | 00001.jpg | 30 | 52 | 246 | 147 | 181 | 182.0 | 276.0 | Suzuki Aerio Sedan 2007 |
| 1 | 00002.jpg | 100 | 19 | 576 | 203 | 103 | 360.0 | 640.0 | Ferrari 458 Italia Convertible 2012 |
| 2 | 00003.jpg | 51 | 105 | 968 | 659 | 145 | 741.0 | 1024.0 | Jeep Patriot SUV 2012 |
| 3 | 00004.jpg | 67 | 84 | 581 | 407 | 187 | 480.0 | 640.0 | Toyota Camry Sedan 2012 |
| 4 | 00005.jpg | 140 | 151 | 593 | 339 | 185 | 373.0 | 600.0 | Tesla Model S Sedan 2012 |
num_val_img_per_class = 6 # Number of images per class to use in validation set
class_list = test_orig_df['class'].unique()
# Create place holder for test_df and val_df
val_df = pd.DataFrame(columns = test_orig_df.columns)
test_df = pd.DataFrame(columns = test_orig_df.columns)
for class_val in class_list:
temp_test_df = test_orig_df[test_orig_df['class'] == class_val]
val_df = val_df.append(temp_test_df[0:num_val_img_per_class])
test_df = test_df.append(temp_test_df[num_val_img_per_class:])
print("Training_Set_Information:")
print("-------------------------")
print(f"Number of images is {train_df.shape[0]}")
print(f"Smallest height of images is {train_df['img_h'].min()}")
print(f"Largest height of images is {train_df['img_h'].max()}")
print(f"Smallest width of images is {train_df['img_w'].min()}")
print(f"Largest width of images is {train_df['img_w'].max()}")
print("Lowest aspect ratio of images is %0.2f"\
%((train_df['img_w']/train_df['img_h']).min()))
print("Highest aspect ratio of images is %0.2f"\
%((train_df['img_w']/train_df['img_h']).max()))
print()
print("Validation_Set_Information:")
print("---------------------------")
print(f"Number of images is {val_df.shape[0]}")
print(f"Smallest height of images is {val_df['img_h'].min()}")
print(f"Largest height of images is {val_df['img_h'].max()}")
print(f"Smallest width of images is {val_df['img_w'].min()}")
print(f"Largest width of images is {val_df['img_w'].max()}")
print("Lowest aspect ratio of images is %0.2f"\
%((val_df['img_w']/val_df['img_h']).min()))
print("Highest aspect ratio of images is %0.2f"\
%((val_df['img_w']/val_df['img_h']).max()))
print()
print("Test_Set_Information:")
print("---------------------")
print(f"Number of images is {test_df.shape[0]}")
print(f"Smallest height of images is {test_df['img_h'].min()}")
print(f"Largest height of images is {test_df['img_h'].max()}")
print(f"Smallest width of images is {test_df['img_w'].min()}")
print(f"Largest width of images is {test_df['img_w'].max()}")
print("Lowest aspect ratio of images is %0.2f"\
%((test_df['img_w']/test_df['img_h']).min()))
print("Highest aspect ratio of images is %0.2f"\
%((test_df['img_w']/test_df['img_h']).max()))
print()
Training_Set_Information: ------------------------- Number of images is 8144 Smallest height of images is 57.0 Largest height of images is 3744.0 Smallest width of images is 78.0 Largest width of images is 5616.0 Lowest aspect ratio of images is 0.59 Highest aspect ratio of images is 3.89 Validation_Set_Information: --------------------------- Number of images is 1176 Smallest height of images is 53.0 Largest height of images is 3240.0 Smallest width of images is 85.0 Largest width of images is 4320.0 Lowest aspect ratio of images is 0.67 Highest aspect ratio of images is 3.00 Test_Set_Information: --------------------- Number of images is 6865 Smallest height of images is 41.0 Largest height of images is 5400.0 Smallest width of images is 78.0 Largest width of images is 7800.0 Lowest aspect ratio of images is 0.60 Highest aspect ratio of images is 3.61
# Class distribution of Training set
plt.figure(figsize = (15, 8))
plot_ = sns.countplot(x = train_df['class'])
plt.title('Class Distrubution of Training_Set', fontsize = 20)
plt.xlabel('Class Values', fontsize = 20)
plt.ylabel('Class Count', fontsize = 20)
plt.xticks(rotation = 90, fontsize = 12)
for ind, label in enumerate(plot_.get_xticklabels()):
if (ind % 10) == 0:
label.set_visible(True)
else:
label.set_visible(False)
plt.show()
# Class distribution of Validation set
plt.figure(figsize = (15, 8))
plot_ = sns.countplot(x = val_df['class'])
plt.title('Class Distrubution of Validation_Set', fontsize = 20)
plt.xlabel('Class Values', fontsize = 20)
plt.ylabel('Class Count', fontsize = 20)
plt.xticks(rotation = 90, fontsize = 12)
for ind, label in enumerate(plot_.get_xticklabels()):
if (ind % 10) == 0:
label.set_visible(True)
else:
label.set_visible(False)
plt.show()
# Class distribution of Test set
plt.figure(figsize = (15, 8))
plot_ = sns.countplot(x = test_df['class'])
plt.title('Class Distrubution of Test_Set', fontsize = 20)
plt.xlabel('Class Values', fontsize = 20)
plt.ylabel('Class Count', fontsize = 20)
plt.xticks(rotation = 90, fontsize = 12)
for ind, label in enumerate(plot_.get_xticklabels()):
if (ind % 10) == 0:
label.set_visible(True)
else:
label.set_visible(False)
plt.show()
# Collate class frequency counts into a dataframe
train_class_df = train_df['class'].value_counts().to_frame(name = 'Training_Set')
val_class_df = val_df['class'].value_counts().to_frame(name = 'Validation_Set')
test_class_df = test_df['class'].value_counts().to_frame(name = 'Test_Set')
class_df = train_class_df.merge(val_class_df, how = 'left', left_index = True, right_index = True)
class_df = class_df.merge(test_class_df, how = 'left', left_index = True, right_index = True)
class_df.index.set_names(names = 'Car_Class', inplace = True)
display(class_df.head(10))
| Training_Set | Validation_Set | Test_Set | |
|---|---|---|---|
| Car_Class | |||
| GMC Savana Van 2012 | 68 | 6 | 62 |
| Chrysler 300 SRT-8 2010 | 49 | 6 | 42 |
| Mercedes-Benz 300-Class Convertible 1993 | 48 | 6 | 42 |
| Mitsubishi Lancer Sedan 2012 | 48 | 6 | 41 |
| Jaguar XK XKR 2012 | 47 | 6 | 40 |
| Chevrolet Corvette ZR1 2012 | 47 | 6 | 40 |
| Dodge Durango SUV 2007 | 46 | 6 | 39 |
| Volvo 240 Sedan 1993 | 46 | 6 | 39 |
| Volkswagen Golf Hatchback 1991 | 46 | 6 | 40 |
| Ford GT Coupe 2006 | 46 | 6 | 39 |
train_img_path = '/content/train_images/'
val_img_path = '/content/test_images/'
test_img_path = '/content/test_images/'
def viz_data(name, X, y_label, X_dtype, mode, num_images, num_cols, col_size, row_size, bm_name = None,\
plot_bbox = False, y_bbox = []):
'''
Function to plot random images from an input array along with corresponding labels
Arguments:
name: Name to print in title (Training_Set, Test_Set etc.)
X: Image array (should be in (batch, height, width, channel)) format
y_label: label array (Raw labels - should not be One-Hot encoded)
X_dtype: Data Type of image array. One of 'Int' or 'Float'
mode: One of 'grayscale' or 'color'
num_images: Number of images to plot from input array
num_cols: Number of columns to use for plotting
col_size: Size of columns to use for plotting
row_size: Size of rows to use for plotting
bm_name: Name of base model that will be used to undo pre-processing (if required)
plot_bbox: Boolean, If true, plot bounding box co-ordinates on top of image
'''
num_rows = math.ceil(num_images / num_cols) # Number of rows to use for plotting
fig = plt.figure(figsize = ((num_cols * col_size), (num_rows * row_size)))
fig.suptitle('Random sample images from ' + name, fontsize = 40)
# Generate random sample indices
samp_index = np.random.randint(low = 0, high = X.shape[0], size = num_images).tolist()
for ind, value in enumerate(samp_index): # Loop through samp_index
if (bm_name == None): # bm_name = None => No preprocessing used.
if (X_dtype == 'int'): # If dtype = 'int', force image dtype to 'uint8'
img = (X[value].squeeze()).astype('uint8') # Extract image and force type to uint8
elif (X_dtype == 'float') : # If dtype = 'float', force image dtype to 'float'
img = (X[value].squeeze()).astype('float32') # Extract image and force type to float32
else: # bm_name != None => pre-processing has to be removed
img = (X[value].squeeze()).astype('float32') # Extract image and force type to float32
img = undo_preprocess_data(img, bm_name) # Undo any pre-processing done on image
img = img.astype('uint8') # Force image dtype to 'uint8'
label = y_label[value] # Extract label
if (plot_bbox):
(xmin_norm, ymin_norm, xmax_norm, ymax_norm) = y_bbox[value] # Extract norm. BBOX coords
img_h = img.shape[0] # Extract image height
img_w = img.shape[1] # Extract image width
# Re-scale BBOX coords to image dimensions
xmin = int(xmin_norm * img_w)
xmax = int(xmax_norm * img_w)
ymin = int(ymin_norm * img_h)
ymax = int(ymax_norm * img_h)
# Set box_thickness based on image area
box_thickness = int(np.ceil(((img_h) * (img_w)) / (100000)))
# Draw BBOX in green
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 255, 0), 2 * box_thickness)
ax = plt.subplot(num_rows, num_cols, (ind + 1))
if (mode == 'grayscale'):
ax.imshow(img, cmap = 'gray') # Plot image in grayscale
elif (mode == 'color'):
ax.imshow(img) # Plot image in color
ax.set_title(f"{label}", fontsize = 17)
ax.grid(False)
plt.show()
def viz_img_df(name, df, x_col, y_col, img_root_path, img_cons, plot_bbox,\
num_images, num_cols, col_size, row_size):
'''
Function to plot random images from an input array along with corresponding labels
Arguments:
name: Name to print in title (Training_Set, Test_Set etc.)
df: Name of dataframe to read image details from
x_col: Name of column in dataframe that contains file names
y_col: Name of column in dataframe that contains the class names
img_root_path: Root directory path where images are stored
img_cons: Boolean: If True, then all images are assumed to consolidated in img_path (no sub-directories).
If False, then the images are assumed to be present inside sub-directories in img_path
named with the class name.
plot_bbox: Boolean: If True, plot bounding-boxes on top of image
Should be set to True only if Dataframe has BBOX co-ords
num_images: Number of images to plot from input array
num_cols: Number of columns to use for plotting
col_size: Size of columns to use for plotting
row_size: Size of rows to use for plotting
'''
num_rows = math.ceil(num_images / num_cols) # Number of rows to use for plotting
fig = plt.figure(figsize = ((num_cols * col_size), (num_rows * row_size)))
fig.suptitle('Random sample images from ' + name, fontsize = 40)
# Generate random sample indices
samp_indices = np.random.randint(low = 0, high = df.shape[0], size = num_images).tolist()
for ind, value in enumerate(samp_indices): # Loop through samp_index
img_file_name = df.iloc[value][x_col] # Extract file name of image
img_class = df.iloc[value][y_col] # Extract class of image
if (img_cons): # img_cons = True -> All images consolidated in img_root_path
img_file_path = os.path.join(img_root_path, img_file_name) # Obtain full path of image
else: # img_cons = False -> Each image is present in a sub-directory inside img_root_path
img_file_path = os.path.join(img_root,path, img_class, img_file_path)
img = cv2.imread(img_file_path, cv2.IMREAD_COLOR) # Load image using cv2 as a 3-channel RGB image
if (plot_bbox): # plot_bbox = True => Plot Bounding-Box on top of image
img_h, img_w = df.iloc[value]['img_h'], df.iloc[value]['img_w'] # Extract image dimensions
xmin, ymin, xmax, ymax = df.iloc[value]['xmin'], df.iloc[value]['ymin'],\
df.iloc[value]['xmax'], df.iloc[value]['ymax'] # Get ground-truth
# BBOX co-ords
# Set box_thickness based on image area
box_thickness = int(np.ceil(((img_h) * (img_w)) / (100000)))
# Draw ground-truth BBOX in green
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 255, 0), box_thickness)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Convert image to RGB format
ax = plt.subplot(num_rows, num_cols, (ind + 1)) # Define plotting axes
ax.imshow(img) # Plot image in color
ax.set_title(f"{img_class}", fontsize = 20) # Set title
ax.grid(False)
plt.show()
# Plot random images from training set
viz_img_df('Training_Set', train_df, 'filename', 'class', train_img_path, True, True, 8, 2, 8, 8)
# Plot random images from validation set
viz_img_df('Validation_Set', val_df, 'filename', 'class', val_img_path, True, True, 8, 2, 8, 8)
# Plot random images from test set
viz_img_df('Test_Set', test_df, 'filename', 'class', test_img_path, True, True, 8, 2, 8, 8)
def plot_img_transf(df, x_col, img_root_path, img_cons, transf_dict, plot_bbox, num_cols, col_size, row_size):
'''
Function to plot various image transformations on an image before applying these transformations
for data augmentation
Arguments:
df: Name of dataframe to read image details from
x_col: Name of column in dataframe that contains file names
img_root_path: Root directory path where images are stored
img_cons: Boolean: If True, then all images are assumed to consolidated in img_path (no sub-directories).
If False, then the images are assumed to be present inside sub-directories in img_path
named with the class name.
transf_dict: Dictionary of image transformations to be checked
plot_bbox: Boolean: If True, plot transformed bounding-boxes on top of image
Should be set to True only if Dataframe has BBOX co-ords
num_cols: Number of columns to use for plotting
col_size: Size of columns to use for plotting
row_size: Size of rows to use for plotting
'''
# Generate random sample index
samp_index = np.random.randint(low = 0, high = df.shape[0], size = 1)[0]
img_transf = {} # Dict place-holder to store original and transformed images
aug_dict = {} # Dict place-holder to store list of all augmenters
# Obtain base image
img_file_name = df.iloc[samp_index][x_col] # Extract file name of image
if (img_cons): # img_cons = True -> All images consolidated in img_root_path
img_file_path = os.path.join(img_root_path, img_file_name) # Obtain full path of image
else: # img_cons = False -> Each image is present in a sub-directory inside img_root_path
img_file_path = os.path.join(img_root,path, img_class, img_file_path)
img = cv2.imread(img_file_path, cv2.IMREAD_COLOR) # Load image using cv2 as a 3-channel RGB image
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Convert image to RGB format
img_batch = np.expand_dims(img, axis = 0) # Add batch axis to img.
# Extract BBOX co-ordinates if plot_bbox is True.
if (plot_bbox): # If BBOXes have to be plotted
(xmin, ymin, xmax, ymax) = (df.iloc[samp_index]['xmin'], df.iloc[samp_index]['ymin'],\
df.iloc[samp_index]['xmax'], df.iloc[samp_index]['ymax'])
bbs = BoundingBoxesOnImage([BoundingBox(x1 = xmin, y1 = ymin, x2 = xmax, y2 = ymax)], shape=img.shape)
img_w_bbox = bbs.draw_on_image(img, size = 4, color = (0, 0, 255))
img_transf['Original_Image'] = img_w_bbox # Append original image to img_transf dict
else:
img_transf['Original_Image'] = img # Append original image to img_transf dict
# Create dict of augmenters
if 'Fliplr' in transf_dict.keys():
aug_dict['Horizontal Flip'] = iaa.Fliplr(1.0)
if 'Rotate' in transf_dict.keys():
aug_dict['Clockwise Rotation'] = iaa.Affine(rotate = transf_dict['Rotate'])
if 'Rotate' in transf_dict.keys():
aug_dict['Anti-Clockwise Rotation'] = iaa.Affine(rotate = (-transf_dict['Rotate']))
if 'Shear' in transf_dict.keys():
aug_dict['Right Shear'] = iaa.Affine(shear = transf_dict['Shear'])
if 'Shear' in transf_dict.keys():
aug_dict['Left Shear'] = iaa.Affine(shear = (-transf_dict['Shear']))
if 'X_shift' in transf_dict.keys():
aug_dict['Right_Shift'] = iaa.Affine(translate_percent = {"x": transf_dict['X_shift']})
if 'X_shift' in transf_dict.keys():
aug_dict['Left_Shift'] = iaa.Affine(translate_percent = {"x": (-transf_dict['X_shift'])})
if 'Y_shift' in transf_dict.keys():
aug_dict['Downward_Shift'] = iaa.Affine(translate_percent = {"y": transf_dict['Y_shift']})
if 'Y_shift' in transf_dict.keys():
aug_dict['Upward_Shift'] = iaa.Affine(translate_percent = {"y": (-transf_dict['Y_shift'])})
if 'X_zoom' in transf_dict.keys():
aug_dict['X_zoom'] = iaa.Affine(scale = {"x": transf_dict['X_zoom']})
if 'Y_zoom' in transf_dict.keys():
aug_dict['Y_zoom'] = iaa.Affine(scale = {"y": transf_dict['Y_zoom']})
# Create transformed images
for key, value in aug_dict.items():
if (plot_bbox):
img_aug, bbs_aug = value(images = img_batch, bounding_boxes = bbs)
img_aug = bbs_aug.draw_on_image(img_aug.squeeze(), size = 4, color = (0, 0, 255))
else:
img_aug = value(images = img_batch)
img_transf[key] = img_aug.squeeze()
# Plot original and transformed images
num_images = len(img_transf) # Total number of images to plot
num_rows = math.ceil(num_images / num_cols) # Number of rows to use for plotting
fig = plt.figure(figsize = ((num_cols * col_size), (num_rows * row_size)))
fig.suptitle('Original image along with some affine transformations', fontsize = 40)
for ind, dict_entry in enumerate(img_transf.items()): # Loop through dictionary items
key, image = dict_entry[0], dict_entry[1]
ax = plt.subplot(num_rows, num_cols, (ind + 1))
ax.imshow(image) # Plot image in color
ax.set_title(key, fontsize = 25) # Set key as title
plt.show()
transf_dict = {'Fliplr': 1.0, # Horizontal flip with 100% probability
'Rotate': 15, # Rotate image by +/- 15 degrees
'Shear': 15, # Shear image by +/- 10 degrees
'X_shift': 0.15, # Left / Right shift of 20%
'Y_shift': 0.15, # Up / Down shift of 20%
'X_zoom': 1.15, # Zoom image along x-axis by 20%
'Y_zoom': 1.15, # Zoom image along y-axis by 20%
}
plot_img_transf(train_df, 'filename', train_img_path, True, transf_dict, True,\
num_cols = 2, col_size = 8, row_size = 8)
### The base model defined here (bm_name) will be used to choose the appropriate pre-processing function
### to apply to the input images in the Image Data Generators
### If bm_name = 'VGG16' or 'ResNet50':
### Only mean shift is applied
### img_preprocessed = img - [103.939, 116.779, 123.68]
### If bm_name = 'MobileNet' or 'InceptionV3' or 'NASNet'
### Image scaled to lie between -1 and +1
### img_preprocessed = (img / 127.5) - 1
### If bm_name = 'grayscale_model'
### Image scaled to lie between 0 and +1
### img_preprocessed = (img / 225.)
# bm_name = 'VGG16'
bm_name = 'ResNet50'
# bm_name = 'MobileNet'
# bm_name = 'InceptionV3'
# bm_name = 'DenseNet'
# bm_name = 'NASNet'
# bm_name = 'EfficientNet'
# bm_name = 'InceptionResNetV2'
# bm_name = 'grayscale_model'
# Define target image size and batch size
mod_inp_shape = (224, 224, 3) # Define target image size for model input
mod_bat_size = 64 # Batch size to use while model fitting
# Define pre-processing and undo pre-processing function
# Appropriate pre-processing functions from Keras are used.
def preprocess_data(img):
if (bm_name == 'VGG16'):
return vgg16.preprocess_input(img)
elif (bm_name == 'ResNet50'):
return resnet50.preprocess_input(img)
elif (bm_name == 'MobileNet'):
return mobilenet.preprocess_input(img)
elif (bm_name == 'InceptionV3'):
return inception_v3.preprocess_input(img)
elif (bm_name == 'DenseNet'):
return densenet.preprocess_input(img)
elif (bm_name == 'NASNet'):
return nasnet.preprocess_input(img)
elif (bm_name == 'EfficientNet'):
return efficientnet.preprocess_input(img)
elif (bm_name == 'InceptionResNetV2'):
return inception_resnet_v2.preprocess_input(img)
elif (bm_name == 'grayscale_model'):
return img/255.
def undo_preprocess_data(img, bm_name):
if ((bm_name == 'VGG16') or (bm_name == 'ResNet50')):
mean = [103.939, 116.779, 123.68]
img[..., 0] += mean[0]
img[..., 1] += mean[1]
img[..., 2] += mean[2]
img = img[..., ::-1].astype('uint8')
elif ((bm_name == 'MobileNet') or (bm_name == 'InceptionV3') or (bm_name == 'NASNet')\
or (bm_name == 'InceptionResNetV2')):
img = ((img + 1) * 127.5).astype('uint8')
elif ((bm_name == 'DenseNet')):
mean = [0.485, 0.456, 0.406]
std = [0.229, 0.224, 0.225]
img[..., 0] *= std[0]
img[..., 1] *= std[1]
img[..., 2] *= std[2]
img[..., 0] += mean[0]
img[..., 1] += mean[1]
img[..., 2] += mean[2]
img = (img * 255.0).astype('uint8')
elif ((bm_name == 'EfficientNet')):
img = (img * 1.0).astype('uint8')
elif (bm_name == 'grayscale_model'):
img = (img * 255.0).astype('uint8')
return img
sometimes = lambda aug: iaa.Sometimes(0.8, aug) # Augment only a certain percent of images
seq = iaa.Sequential([iaa.size.Resize(size = mod_inp_shape[0], interpolation = cv2.INTER_CUBIC),
iaa.Fliplr(0.5),
iaa.Affine(rotate=(-15, 15)),
iaa.Affine(shear=(-15, 15)),
iaa.Affine(translate_percent = {"x": (-0.15, 0.15), "y": (-0.15, 0.15)}),
iaa.Affine(scale = {"x": (0.85, 1.15), "y": (0.85, 1.15)}),
sometimes(iaa.SomeOf(3, [
iaa.AddToHueAndSaturation((-20, 20)),
iaa.Add((-20, 20), per_channel=0.5),
iaa.Multiply((0.5, 1.5), per_channel=0.5),
iaa.GaussianBlur((0, 1.5)),
iaa.ContrastNormalization((0.5, 2.0), per_channel=0.5),
iaa.Sharpen(alpha=(0, 0.5), lightness=(0.7, 1.3)),
iaa.Emboss(alpha=(0, 0.5), strength=(0, 1.5))
], random_order = True)
)
])
seq_rescale = iaa.size.Resize(size = mod_inp_shape[0], interpolation = cv2.INTER_CUBIC)
## Verify augmentation on two sample images
df = train_df.copy()
samp_indices = np.random.randint(0, df.shape[0], 2)
img_batch = []
bbox_batch = []
for samp_index in samp_indices:
img_file_name = df.iloc[samp_index]['filename']
img_file_path = os.path.join(train_img_path, img_file_name)
img = cv2.imread(img_file_path, cv2.IMREAD_COLOR) # Load image using cv2 as a 3-channel RGB image
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Convert image to RGB format
(xmin_orig, ymin_orig, xmax_orig, ymax_orig) = (df.iloc[samp_index]['xmin'], df.iloc[samp_index]['ymin'],\
df.iloc[samp_index]['xmax'], df.iloc[samp_index]['ymax'])
# img_width_orig = df.iloc[samp_index]['img_w'] # Width of original image
# img_height_orig = df.iloc[samp_index]['img_h'] # Height of original image
# xmin_norm = (xmin_orig / img_width_orig) # Scaled xmin co-ordinate
# ymin_norm = (ymin_orig / img_height_orig) # Scaled ymin co-ordinate
# xmax_norm = (xmax_orig / img_width_orig) # Scaled xmax co-ordinate
# ymax_norm = (ymax_orig / img_height_orig) # Scaled ymax co-ordinate
# width_bbox_norm = xmax_norm - xmin_norm # Normalized width of BBOX
# height_bbox_norm = ymax_norm - ymin_norm # Normalized height of BBOX
# img = cv2.resize(img, (mod_inp_shape[0], mod_inp_shape[1]), interpolation = cv2.INTER_CUBIC)
img_batch.append(img)
# xmin_sc = xmin_norm * mod_inp_shape[0]
# ymin_sc = ymin_norm * mod_inp_shape[1]
# xmax_sc = xmax_norm * mod_inp_shape[0]
# ymax_sc = ymax_norm * mod_inp_shape[1]
bbs = BoundingBoxesOnImage([BoundingBox(x1 = xmin_orig, y1 = ymin_orig, x2 = xmax_orig, y2 = ymax_orig)],\
shape=img.shape)
bbox_batch.append(bbs)
img_bbox_batch = Batch(images = img_batch, bounding_boxes = bbox_batch)
img_bbox_batch_aug = seq.augment_batch(img_bbox_batch)
img_unaug = img_bbox_batch_aug.images_unaug
bbs_unaug = img_bbox_batch_aug.bounding_boxes_unaug
img_aug = img_bbox_batch_aug.images_aug
bbs_aug = img_bbox_batch_aug.bounding_boxes_aug
plt.figure(figsize = (16, 16))
for ind in range(len(img_unaug)):
img = img_unaug[ind]
bbs = bbs_unaug[ind]
img_w_bbox = bbs.draw_on_image(img, size = 2, color = (0, 0, 255))
ax = plt.subplot(2, 2, (2 * ind + 1))
ax.imshow(img_w_bbox)
img = img_aug[ind]
bbs = bbs_aug[ind]
bbs_clip = bbs.clip_out_of_image()
img_w_bbox_aug = bbs_clip.draw_on_image(img, size = 2, color = (0, 0, 255))
ax = plt.subplot(2, 2, (2 * ind + 2))
ax.imshow(img_w_bbox_aug)
plt.show()
class batch_generator_from_df(Sequence):
def __init__(self, df, directory, x_col, y_col, gen_bbox, batch_size, target_size, aug = False, shuffle = False,\
seed = 1234, preprocessing_function = None):
self.df = df # Name of dataframe to read image information from
self.samples = df.shape[0] # Placeholder to store number of images in dataframe
self.directory = directory # Directory name to read images from
self.x_col = x_col # Name of column in dataframe that contains image file names
self.y_col = y_col # Name of column in dataframe that contains image class names
self.gen_bbox = gen_bbox # Boolean: If True, bboxes should be included in target output
self.batch_size = batch_size # Batch size to use for each batch
self.target_size = target_size # Target image size
self.image_shape = (self.target_size[0], self.target_size[1], 3) # Placeholder to store output
# image size
self.aug = aug # Boolean: If True, augment images
self.shuffle = shuffle # Boolen: If True, shuffle dataset at start of every epoch
self.seed = seed # Random seed to use for shuffling
self.preprocessing_function = preprocessing_function # Preprocessing function to apply to every image
self.class_list = sorted(df[y_col].unique()) # Placeholder to store class list
self.num_classes = len(self.class_list) # Placeholder to store number of classes
# Placeholder to store class to index mapping
self.class_indices = {value:ind for ind, value in enumerate(self.class_list)}
self.on_epoch_end()
print(f"Found {self.samples} images belonging to {self.num_classes} classes")
def __len__(self):
return int(np.ceil(float(self.df.shape[0] / self.batch_size)))
def __getitem__(self, index):
### Generate current batch from dataframe
start_ind = index * self.batch_size # Start index of current batch
end_ind = min((index + 1) * self.batch_size, self.df.shape[0]) # End index of current batch
sub_df = self.df[start_ind:end_ind] # Create sub dataframe using start and end indices
### Define placeholders
img_batch = [] # Placeholder to store images during processing
y_batch = [] # Placeholder to store target values
y_label = [] # Placeholder to store target labels
y_bbox = [] # Placeholder to store target BBOX co-ordinates
bbs_batch = [] # Placeholder to store bbs values (required for imgaug)
for _, row in sub_df.iterrows(): # Iterate through all rows in sub_df
### Update y_label
label = row[self.y_col] # Extract label of current row
label_index = self.class_indices[label] # Convert label to label index
label_ohe = to_categorical(label_index, num_classes = self.num_classes) # OHE label_index
y_label.append(label_ohe) # Update y_label with current OHE label_index
### Update X_batch
img_file_name = row[self.x_col] # Extract image file name of current row
img_file_path = os.path.join(self.directory, img_file_name) # Obtain image file path
img = cv2.imread(img_file_path) # Read image from disk
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Convert image to RGB
img_batch.append(img) # Update img_batch with current image
### Update y_bbox and bbs_batch
if (self.gen_bbox): # gen_bbox = True => Generate BBOX co-ordinates also for output
# Get size and BBOX co-ordinates of original image
img_width_orig = row['img_w'] # Width of original image
img_height_orig = row['img_h'] # Height of original image
xmin_orig = row['xmin'] # xmin co-ordinate of BBOX in original image
ymin_orig = row['ymin'] # ymin co-ordinate of BBOX in original image
xmax_orig = row['xmax'] # xmax co-ordinate of BBOX in original image
ymax_orig = row['ymax'] # ymax co-ordinate of BBOX in original image
# Generate bbs object from BBOX coordinates
bbs = BoundingBoxesOnImage([BoundingBox(x1 = xmin_orig, y1 = ymin_orig, x2 = xmax_orig,\
y2 = ymax_orig)], shape=img.shape)
# Append current bbs object to bbs_batch
bbs_batch.append(bbs)
### Generate normalized co-ordinates of original BBOX
xmin_norm = (xmin_orig / img_width_orig) # Normalized xmin co-ordinate
ymin_norm = (ymin_orig / img_height_orig) # Normalized ymin co-ordinate
xmax_norm = (xmax_orig / img_width_orig) # Normalized xmax co-ordinate
ymax_norm = (ymax_orig / img_height_orig) # Normalized ymax co-ordinate
# Append unaugmented BBOX co-ordinates to y_bbox
y_bbox.append([xmin_norm, ymin_norm, xmax_norm, ymax_norm])
### Augment images and BBOXes
if (self.aug): # aug = True => Augment images and BBOxes
if (self.gen_bbox): # Augment both image and bounding box
# Generate imgaug batch object with current batch of images and BBOXes
img_bbox_batch = Batch(images = img_batch, bounding_boxes = bbs_batch)
# Augment entire batch of images and BBOXes
img_bbox_batch_aug = seq.augment_batch(img_bbox_batch)
# Extract list of augmented images
img_batch = img_bbox_batch_aug.images_aug
# Extract list of augmented bbs objects
bbs_aug_batch = img_bbox_batch_aug.bounding_boxes_aug
bbs_aug_list = [] # Placeholder to store augmented bounding box coordinates
for ind in range(len(bbs_aug_batch)): # Loop through all bbs objects in batch
bbs_aug = bbs_aug_batch[ind] # Extract current bbs object
bbs_aug_clip = bbs_aug.clip_out_of_image() # Clip current bbs object to image dimensions
# Extract xy-coords of bbox and append to bbs_list
bbs_aug_list.append(bbs_aug_clip.to_xyxy_array()[0])
bbs_aug_arr = np.array(bbs_aug_list) # Convert bbs_aug_list to array
y_bbox = bbs_aug_arr/bbs_aug.width # Normalize bbs_aug_arr and copy to y_bbox
else: # Augment only images
img_batch = seq.augment_images(images = img_batch)
else: # aug = False => Only re-scale images and BBOXes
if (self.gen_bbox): # Augment both image and bounding box
# Generate imgaug batch object with current batch of images and BBOXes
img_bbox_batch = Batch(images = img_batch, bounding_boxes = bbs_batch)
# Augment entire batch of images and BBOXes
img_bbox_batch_aug = seq_rescale.augment_batch(img_bbox_batch)
# Extract list of augmented images
img_batch = img_bbox_batch_aug.images_aug
# Extract list of augmented bbs objects
bbs_aug_batch = img_bbox_batch_aug.bounding_boxes_aug
bbs_aug_list = [] # Placeholder to store augmented bounding box coordinates
for ind in range(len(bbs_aug_batch)): # Loop through all bbs objects in batch
bbs_aug = bbs_aug_batch[ind] # Extract current bbs object
bbs_aug_clip = bbs_aug.clip_out_of_image() # Clip current bbs object to image dimensions
# Extract xy-coords of bbox and append to bbs_list
bbs_aug_list.append(bbs_aug_clip.to_xyxy_array()[0])
bbs_aug_arr = np.array(bbs_aug_list) # Convert bbs_aug_list to array
y_bbox = bbs_aug_arr/bbs_aug.width # Normalize bbs_aug_arr and copy to y_bbox
else: # Augment only images
img_batch = seq_rescale.augment_images(images = img_batch)
### Batch pre-processing of images
img_batch = np.array(img_batch, dtype = np.float32) # Convert img_batch to array
if (self.preprocessing_function != None): # preprocessing_function != None => Preprocess img_batch
img_batch = self.preprocessing_function(img_batch)
### Generate final processed batch of images
X_batch = img_batch
### Generate final processed labels
y_label = np.array(y_label, dtype = np.float32)
if (self.gen_bbox):
y_bbox = np.array(y_bbox, dtype = np.float32)
y_batch = [y_label, y_bbox]
else:
y_batch = y_label
return X_batch, y_batch
def on_epoch_end(self):
if (self.shuffle): # If 'shuffle' = True, random shuffle the dataframe at the end of the epoch
self.df = self.df.sample(frac = 1, random_state = self.seed).reset_index(drop = True)
# Define train, val and test generator objects
train_generator = batch_generator_from_df(train_df, train_img_path, 'filename', 'class', True,\
mod_bat_size, mod_inp_shape[0:2], aug = True, shuffle = True,\
preprocessing_function = preprocess_data)
val_generator = batch_generator_from_df(val_df, test_img_path, 'filename', 'class', True,\
mod_bat_size, mod_inp_shape[0:2], aug = False, shuffle = False,\
preprocessing_function = preprocess_data)
test_generator = batch_generator_from_df(test_df, test_img_path, 'filename', 'class', True,\
mod_bat_size, mod_inp_shape[0:2], aug = False, shuffle = False,\
preprocessing_function = preprocess_data)
Found 8144 images belonging to 196 classes Found 1176 images belonging to 196 classes Found 6865 images belonging to 196 classes
# Print some details of train, val and test generators
num_classes = len(train_generator.class_indices)
num_batches_train = len(train_generator)
num_batches_val = len(val_generator)
num_batches_test = len(test_generator)
print("Train_Generator_Information:")
print("----------------------------")
print(f"Total number of samples: {train_generator.samples}")
print(f"Number of classes: {num_classes}")
print(f"Size of each batch: {train_generator.batch_size}")
print(f"Shape of images: {train_generator.image_shape}")
print(f"Number of batches: {num_batches_train}")
print()
print("Val_Generator_Information:")
print("----------------------------")
print(f"Total number of samples: {val_generator.samples}")
print(f"Size of each batch: {val_generator.batch_size}")
print(f"Shape of images: {val_generator.image_shape}")
print(f"Number of batches: {num_batches_val}")
print()
print("Test_Generator_Information:")
print("----------------------------")
print(f"Total number of samples: {test_generator.samples}")
print(f"Size of each batch: {test_generator.batch_size}")
print(f"Shape of images: {test_generator.image_shape}")
print(f"Number of batches: {num_batches_test}")
Train_Generator_Information: ---------------------------- Total number of samples: 8144 Number of classes: 196 Size of each batch: 64 Shape of images: (224, 224, 3) Number of batches: 128 Val_Generator_Information: ---------------------------- Total number of samples: 1176 Size of each batch: 64 Shape of images: (224, 224, 3) Number of batches: 19 Test_Generator_Information: ---------------------------- Total number of samples: 6865 Size of each batch: 64 Shape of images: (224, 224, 3) Number of batches: 108
# Extract class-index mapping from train generator
class_ind_dict = train_generator.class_indices
# Create index-class mapping from class-index mapping
ind_class_dict = {value:ind for ind, value in class_ind_dict.items()}
for ind, class_val in ind_class_dict.items():
if (ind < 10):
print(f"Index: {ind}, Class: {class_val}")
Index: 0, Class: AM General Hummer SUV 2000 Index: 1, Class: Acura Integra Type R 2001 Index: 2, Class: Acura RL Sedan 2012 Index: 3, Class: Acura TL Sedan 2012 Index: 4, Class: Acura TL Type-S 2008 Index: 5, Class: Acura TSX Sedan 2012 Index: 6, Class: Acura ZDX Hatchback 2012 Index: 7, Class: Aston Martin V8 Vantage Convertible 2012 Index: 8, Class: Aston Martin V8 Vantage Coupe 2012 Index: 9, Class: Aston Martin Virage Convertible 2012
# Generate a batch of data from train_generator
X_batch, y_batch = train_generator.__getitem__(0)
y_label = y_batch[0]
y_bbox = y_batch[1]
y_label = np.argmax(y_label, axis = 1)
label = [ind_class_dict[y] for y in y_label]
viz_data('Train_Generator', X_batch, label, 'int', 'color', num_images = 9, num_cols = 3,\
col_size = 6, row_size = 6, bm_name = bm_name, plot_bbox = True, y_bbox = y_bbox)
print("\n\n")
# Generate a batch of data from validation_generator
X_batch, y_batch = val_generator.__getitem__(0)
y_label = y_batch[0]
y_bbox = y_batch[1]
y_label = np.argmax(y_label, axis = 1)
label = [ind_class_dict[y] for y in y_label]
viz_data('Val_Generator', X_batch, label, 'int', 'color', num_images = 9, num_cols = 3,\
col_size = 6, row_size = 6, bm_name = bm_name, plot_bbox = True, y_bbox = y_bbox)
print("\n\n")
# Generate a batch of data from test_generator
X_batch, y_batch = test_generator.__getitem__(0)
y_label = y_batch[0]
y_bbox = y_batch[1]
y_label = np.argmax(y_label, axis = 1)
label = [ind_class_dict[y] for y in y_label]
viz_data('Test_Generator', X_batch, label, 'int', 'color', num_images = 9, num_cols = 3,\
col_size = 6, row_size = 6, bm_name = bm_name, plot_bbox = True, y_bbox = y_bbox)
print("\n\n")
def model_core(model_dict):
"""
Function to define the model core.
Arguments:
model_dict - Dictionary with list of keys / values needed to build the model
Returns:
model - Model with all layers instantiated
"""
# Retrieve model dict parameters
model_arch = model_dict['model_arch'] # Model Architecture
##### Start Model Architecture A (ResNet50 Base)
if (model_arch == 'A'):
# Retrieve arch. specific model dict parameters
mod_inp_shape = model_dict['mod_inp_shape'] # Shape of input tensor to model
weights = model_dict['weights']
trainable = model_dict['trainable']
dropout_rate = model_dict['dropout_rate']
num_classes = model_dict['num_classes']
base_model = resnet50.ResNet50(input_shape = mod_inp_shape, weights = weights,\
include_top = False, pooling = 'avg')
for layer in base_model.layers: # Re-train all layers
layer.trainable = trainable
X = base_model.output
X = Dropout(rate = dropout_rate[0], name = 'DR0')(X)
# Classification Path
X1 = Dense(1024, activation = 'relu')(X)
X1 = Dropout(rate = dropout_rate[1], name = 'DR1')(X1)
X1 = Dense(512, activation = 'relu')(X1)
X1 = Dropout(rate = dropout_rate[2], name = 'DR2')(X1)
X1 = Dense(512, activation = 'relu')(X1)
X1 = Dropout(rate = dropout_rate[3], name = 'DR3')(X1)
label_output = Dense(num_classes, activation = 'softmax', name = 'class_op')(X1)
# BBOX Regression path
X2 = Dense(1024, activation = 'relu')(X)
X2 = Dropout(rate = dropout_rate[4], name = 'DR4')(X2)
X2 = Dense(1024, activation = 'relu')(X2)
X2 = Dropout(rate = dropout_rate[5], name = 'DR5')(X2)
X2 = Dense(512, activation = 'relu')(X2)
X2 = Dropout(rate = dropout_rate[6], name = 'DR6')(X2)
X2 = Dense(512, activation = 'relu')(X2)
X2 = Dropout(rate = dropout_rate[7], name = 'DR7')(X2)
bbox_output = Dense(4, activation = 'sigmoid', name = 'reg_op')(X2)
model = Model(inputs = base_model.input, outputs = [label_output, bbox_output])
##### End Model Architecture A
##### Start Model Architecture B (DenseNet201 Base)
if (model_arch == 'B'):
# Retrieve arch. specific model dict parameters
mod_inp_shape = model_dict['mod_inp_shape'] # Shape of input tensor to model
weights = model_dict['weights']
trainable = model_dict['trainable']
dropout_rate = model_dict['dropout_rate']
num_classes = model_dict['num_classes']
base_model = densenet.DenseNet201(input_shape = mod_inp_shape, weights = weights,\
include_top = False, pooling = 'avg')
for layer in base_model.layers: # Re-train all layers
layer.trainable = trainable
X = base_model.output
X = Dropout(rate = dropout_rate[0], name = 'DR0')(X)
# Classification Path
X1 = Dense(1024, activation = 'relu')(X)
X1 = Dropout(rate = dropout_rate[1], name = 'DR1')(X1)
X1 = Dense(512, activation = 'relu')(X1)
X1 = Dropout(rate = dropout_rate[2], name = 'DR2')(X1)
X1 = Dense(512, activation = 'relu')(X1)
X1 = Dropout(rate = dropout_rate[3], name = 'DR3')(X1)
label_output = Dense(num_classes, activation = 'softmax', name = 'class_op')(X1)
# BBOX Regression Path
X2 = Dense(1024, activation = 'relu')(X)
X2 = Dropout(rate = dropout_rate[4], name = 'DR4')(X2)
X2 = Dense(1024, activation = 'relu')(X2)
X2 = Dropout(rate = dropout_rate[5], name = 'DR5')(X2)
X2 = Dense(512, activation = 'relu')(X2)
X2 = Dropout(rate = dropout_rate[6], name = 'DR6')(X2)
X2 = Dense(512, activation = 'relu')(X2)
X2 = Dropout(rate = dropout_rate[7], name = 'DR7')(X2)
bbox_output = Dense(4, activation = 'sigmoid', name = 'reg_op')(X2)
model = Model(inputs = base_model.input, outputs = [label_output, bbox_output])
##### End Model Architecture B
##### Start Model Architecture C (InceptionResentV2 Base)
if (model_arch == 'C'):
# Retrieve arch. specific model dict parameters
mod_inp_shape = model_dict['mod_inp_shape'] # Shape of input tensor to model
weights = model_dict['weights']
trainable = model_dict['trainable']
dropout_rate = model_dict['dropout_rate']
num_classes = model_dict['num_classes']
base_model = inception_resnet_v2.InceptionResNetV2(input_shape = mod_inp_shape, weights = weights,\
include_top = False, pooling = 'avg')
for layer in base_model.layers: # Re-train all layers
layer.trainable = trainable
X = base_model.output
X = Dropout(rate = dropout_rate[0], name = 'DR0')(X)
# Classification Path
X1 = Dense(1024, activation = 'relu')(X)
X1 = Dropout(rate = dropout_rate[1], name = 'DR1')(X1)
X1 = Dense(512, activation = 'relu')(X1)
X1 = Dropout(rate = dropout_rate[2], name = 'DR2')(X1)
X1 = Dense(512, activation = 'relu')(X1)
X1 = Dropout(rate = dropout_rate[3], name = 'DR3')(X1)
label_output = Dense(num_classes, activation = 'softmax', name = 'class_op')(X1)
# BBOX Regression Path
X2 = Dense(1024, activation = 'relu')(X)
X2 = Dropout(rate = dropout_rate[4], name = 'DR4')(X2)
X2 = Dense(1024, activation = 'relu')(X2)
X2 = Dropout(rate = dropout_rate[5], name = 'DR5')(X2)
X2 = Dense(512, activation = 'relu')(X2)
X2 = Dropout(rate = dropout_rate[6], name = 'DR6')(X2)
X2 = Dense(512, activation = 'relu')(X2)
X2 = Dropout(rate = dropout_rate[7], name = 'DR7')(X2)
bbox_output = Dense(4, activation = 'sigmoid', name = 'reg_op')(X2)
model = Model(inputs = base_model.input, outputs = [label_output, bbox_output])
##### End Model Architecture C
##### Start Model Architecture D (EfficientNetB4 Base)
if (model_arch == 'D'):
# Retrieve arch. specific model dict parameters
mod_inp_shape = model_dict['mod_inp_shape'] # Shape of input tensor to model
weights = model_dict['weights']
trainable = model_dict['trainable']
dropout_rate = model_dict['dropout_rate']
num_classes = model_dict['num_classes']
base_model = EfficientNetB4(input_shape = mod_inp_shape, weights = weights,\
include_top = False, pooling = 'avg')
for layer in base_model.layers: # Re-train all layers
layer.trainable = trainable
X = base_model.output
X = Dropout(rate = dropout_rate[0], name = 'DR0')(X)
# Classification Path
X1 = Dense(1024, activation = 'relu')(X)
X1 = Dropout(rate = dropout_rate[1], name = 'DR1')(X1)
X1 = Dense(512, activation = 'relu')(X1)
X1 = Dropout(rate = dropout_rate[2], name = 'DR2')(X1)
X1 = Dense(512, activation = 'relu')(X1)
X1 = Dropout(rate = dropout_rate[3], name = 'DR3')(X1)
label_output = Dense(num_classes, activation = 'softmax', name = 'class_op')(X1)
# BBOX Regression Path
X2 = Dense(1024, activation = 'relu')(X)
X2 = Dropout(rate = dropout_rate[4], name = 'DR4')(X2)
X2 = Dense(1024, activation = 'relu')(X2)
X2 = Dropout(rate = dropout_rate[5], name = 'DR5')(X2)
X2 = Dense(512, activation = 'relu')(X2)
X2 = Dropout(rate = dropout_rate[6], name = 'DR6')(X2)
X2 = Dense(512, activation = 'relu')(X2)
X2 = Dropout(rate = dropout_rate[7], name = 'DR7')(X2)
bbox_output = Dense(4, activation = 'sigmoid', name = 'reg_op')(X2)
model = Model(inputs = base_model.input, outputs = [label_output, bbox_output])
##### End Model Architecture D
return model
# Verify model defined above
model_dict = {'model_arch': 'C',
'mod_inp_shape': mod_inp_shape,
'weights': 'imagenet',
'trainable': True,
'dropout_rate': [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0],
'num_classes': num_classes
}
temp_model = model_core(model_dict)
temp_model.summary()
Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/inception_resnet_v2/inception_resnet_v2_weights_tf_dim_ordering_tf_kernels_notop.h5
219062272/219055592 [==============================] - 3s 0us/step
Model: "model"
__________________________________________________________________________________________________
Layer (type) Output Shape Param # Connected to
==================================================================================================
input_1 (InputLayer) [(None, 224, 224, 3) 0
__________________________________________________________________________________________________
conv2d (Conv2D) (None, 111, 111, 32) 864 input_1[0][0]
__________________________________________________________________________________________________
batch_normalization (BatchNorma (None, 111, 111, 32) 96 conv2d[0][0]
__________________________________________________________________________________________________
activation (Activation) (None, 111, 111, 32) 0 batch_normalization[0][0]
__________________________________________________________________________________________________
conv2d_1 (Conv2D) (None, 109, 109, 32) 9216 activation[0][0]
__________________________________________________________________________________________________
batch_normalization_1 (BatchNor (None, 109, 109, 32) 96 conv2d_1[0][0]
__________________________________________________________________________________________________
activation_1 (Activation) (None, 109, 109, 32) 0 batch_normalization_1[0][0]
__________________________________________________________________________________________________
conv2d_2 (Conv2D) (None, 109, 109, 64) 18432 activation_1[0][0]
__________________________________________________________________________________________________
batch_normalization_2 (BatchNor (None, 109, 109, 64) 192 conv2d_2[0][0]
__________________________________________________________________________________________________
activation_2 (Activation) (None, 109, 109, 64) 0 batch_normalization_2[0][0]
__________________________________________________________________________________________________
max_pooling2d (MaxPooling2D) (None, 54, 54, 64) 0 activation_2[0][0]
__________________________________________________________________________________________________
conv2d_3 (Conv2D) (None, 54, 54, 80) 5120 max_pooling2d[0][0]
__________________________________________________________________________________________________
batch_normalization_3 (BatchNor (None, 54, 54, 80) 240 conv2d_3[0][0]
__________________________________________________________________________________________________
activation_3 (Activation) (None, 54, 54, 80) 0 batch_normalization_3[0][0]
__________________________________________________________________________________________________
conv2d_4 (Conv2D) (None, 52, 52, 192) 138240 activation_3[0][0]
__________________________________________________________________________________________________
batch_normalization_4 (BatchNor (None, 52, 52, 192) 576 conv2d_4[0][0]
__________________________________________________________________________________________________
activation_4 (Activation) (None, 52, 52, 192) 0 batch_normalization_4[0][0]
__________________________________________________________________________________________________
max_pooling2d_1 (MaxPooling2D) (None, 25, 25, 192) 0 activation_4[0][0]
__________________________________________________________________________________________________
conv2d_8 (Conv2D) (None, 25, 25, 64) 12288 max_pooling2d_1[0][0]
__________________________________________________________________________________________________
batch_normalization_8 (BatchNor (None, 25, 25, 64) 192 conv2d_8[0][0]
__________________________________________________________________________________________________
activation_8 (Activation) (None, 25, 25, 64) 0 batch_normalization_8[0][0]
__________________________________________________________________________________________________
conv2d_6 (Conv2D) (None, 25, 25, 48) 9216 max_pooling2d_1[0][0]
__________________________________________________________________________________________________
conv2d_9 (Conv2D) (None, 25, 25, 96) 55296 activation_8[0][0]
__________________________________________________________________________________________________
batch_normalization_6 (BatchNor (None, 25, 25, 48) 144 conv2d_6[0][0]
__________________________________________________________________________________________________
batch_normalization_9 (BatchNor (None, 25, 25, 96) 288 conv2d_9[0][0]
__________________________________________________________________________________________________
activation_6 (Activation) (None, 25, 25, 48) 0 batch_normalization_6[0][0]
__________________________________________________________________________________________________
activation_9 (Activation) (None, 25, 25, 96) 0 batch_normalization_9[0][0]
__________________________________________________________________________________________________
average_pooling2d (AveragePooli (None, 25, 25, 192) 0 max_pooling2d_1[0][0]
__________________________________________________________________________________________________
conv2d_5 (Conv2D) (None, 25, 25, 96) 18432 max_pooling2d_1[0][0]
__________________________________________________________________________________________________
conv2d_7 (Conv2D) (None, 25, 25, 64) 76800 activation_6[0][0]
__________________________________________________________________________________________________
conv2d_10 (Conv2D) (None, 25, 25, 96) 82944 activation_9[0][0]
__________________________________________________________________________________________________
conv2d_11 (Conv2D) (None, 25, 25, 64) 12288 average_pooling2d[0][0]
__________________________________________________________________________________________________
batch_normalization_5 (BatchNor (None, 25, 25, 96) 288 conv2d_5[0][0]
__________________________________________________________________________________________________
batch_normalization_7 (BatchNor (None, 25, 25, 64) 192 conv2d_7[0][0]
__________________________________________________________________________________________________
batch_normalization_10 (BatchNo (None, 25, 25, 96) 288 conv2d_10[0][0]
__________________________________________________________________________________________________
batch_normalization_11 (BatchNo (None, 25, 25, 64) 192 conv2d_11[0][0]
__________________________________________________________________________________________________
activation_5 (Activation) (None, 25, 25, 96) 0 batch_normalization_5[0][0]
__________________________________________________________________________________________________
activation_7 (Activation) (None, 25, 25, 64) 0 batch_normalization_7[0][0]
__________________________________________________________________________________________________
activation_10 (Activation) (None, 25, 25, 96) 0 batch_normalization_10[0][0]
__________________________________________________________________________________________________
activation_11 (Activation) (None, 25, 25, 64) 0 batch_normalization_11[0][0]
__________________________________________________________________________________________________
mixed_5b (Concatenate) (None, 25, 25, 320) 0 activation_5[0][0]
activation_7[0][0]
activation_10[0][0]
activation_11[0][0]
__________________________________________________________________________________________________
conv2d_15 (Conv2D) (None, 25, 25, 32) 10240 mixed_5b[0][0]
__________________________________________________________________________________________________
batch_normalization_15 (BatchNo (None, 25, 25, 32) 96 conv2d_15[0][0]
__________________________________________________________________________________________________
activation_15 (Activation) (None, 25, 25, 32) 0 batch_normalization_15[0][0]
__________________________________________________________________________________________________
conv2d_13 (Conv2D) (None, 25, 25, 32) 10240 mixed_5b[0][0]
__________________________________________________________________________________________________
conv2d_16 (Conv2D) (None, 25, 25, 48) 13824 activation_15[0][0]
__________________________________________________________________________________________________
batch_normalization_13 (BatchNo (None, 25, 25, 32) 96 conv2d_13[0][0]
__________________________________________________________________________________________________
batch_normalization_16 (BatchNo (None, 25, 25, 48) 144 conv2d_16[0][0]
__________________________________________________________________________________________________
activation_13 (Activation) (None, 25, 25, 32) 0 batch_normalization_13[0][0]
__________________________________________________________________________________________________
activation_16 (Activation) (None, 25, 25, 48) 0 batch_normalization_16[0][0]
__________________________________________________________________________________________________
conv2d_12 (Conv2D) (None, 25, 25, 32) 10240 mixed_5b[0][0]
__________________________________________________________________________________________________
conv2d_14 (Conv2D) (None, 25, 25, 32) 9216 activation_13[0][0]
__________________________________________________________________________________________________
conv2d_17 (Conv2D) (None, 25, 25, 64) 27648 activation_16[0][0]
__________________________________________________________________________________________________
batch_normalization_12 (BatchNo (None, 25, 25, 32) 96 conv2d_12[0][0]
__________________________________________________________________________________________________
batch_normalization_14 (BatchNo (None, 25, 25, 32) 96 conv2d_14[0][0]
__________________________________________________________________________________________________
batch_normalization_17 (BatchNo (None, 25, 25, 64) 192 conv2d_17[0][0]
__________________________________________________________________________________________________
activation_12 (Activation) (None, 25, 25, 32) 0 batch_normalization_12[0][0]
__________________________________________________________________________________________________
activation_14 (Activation) (None, 25, 25, 32) 0 batch_normalization_14[0][0]
__________________________________________________________________________________________________
activation_17 (Activation) (None, 25, 25, 64) 0 batch_normalization_17[0][0]
__________________________________________________________________________________________________
block35_1_mixed (Concatenate) (None, 25, 25, 128) 0 activation_12[0][0]
activation_14[0][0]
activation_17[0][0]
__________________________________________________________________________________________________
block35_1_conv (Conv2D) (None, 25, 25, 320) 41280 block35_1_mixed[0][0]
__________________________________________________________________________________________________
block35_1 (Lambda) (None, 25, 25, 320) 0 mixed_5b[0][0]
block35_1_conv[0][0]
__________________________________________________________________________________________________
block35_1_ac (Activation) (None, 25, 25, 320) 0 block35_1[0][0]
__________________________________________________________________________________________________
conv2d_21 (Conv2D) (None, 25, 25, 32) 10240 block35_1_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_21 (BatchNo (None, 25, 25, 32) 96 conv2d_21[0][0]
__________________________________________________________________________________________________
activation_21 (Activation) (None, 25, 25, 32) 0 batch_normalization_21[0][0]
__________________________________________________________________________________________________
conv2d_19 (Conv2D) (None, 25, 25, 32) 10240 block35_1_ac[0][0]
__________________________________________________________________________________________________
conv2d_22 (Conv2D) (None, 25, 25, 48) 13824 activation_21[0][0]
__________________________________________________________________________________________________
batch_normalization_19 (BatchNo (None, 25, 25, 32) 96 conv2d_19[0][0]
__________________________________________________________________________________________________
batch_normalization_22 (BatchNo (None, 25, 25, 48) 144 conv2d_22[0][0]
__________________________________________________________________________________________________
activation_19 (Activation) (None, 25, 25, 32) 0 batch_normalization_19[0][0]
__________________________________________________________________________________________________
activation_22 (Activation) (None, 25, 25, 48) 0 batch_normalization_22[0][0]
__________________________________________________________________________________________________
conv2d_18 (Conv2D) (None, 25, 25, 32) 10240 block35_1_ac[0][0]
__________________________________________________________________________________________________
conv2d_20 (Conv2D) (None, 25, 25, 32) 9216 activation_19[0][0]
__________________________________________________________________________________________________
conv2d_23 (Conv2D) (None, 25, 25, 64) 27648 activation_22[0][0]
__________________________________________________________________________________________________
batch_normalization_18 (BatchNo (None, 25, 25, 32) 96 conv2d_18[0][0]
__________________________________________________________________________________________________
batch_normalization_20 (BatchNo (None, 25, 25, 32) 96 conv2d_20[0][0]
__________________________________________________________________________________________________
batch_normalization_23 (BatchNo (None, 25, 25, 64) 192 conv2d_23[0][0]
__________________________________________________________________________________________________
activation_18 (Activation) (None, 25, 25, 32) 0 batch_normalization_18[0][0]
__________________________________________________________________________________________________
activation_20 (Activation) (None, 25, 25, 32) 0 batch_normalization_20[0][0]
__________________________________________________________________________________________________
activation_23 (Activation) (None, 25, 25, 64) 0 batch_normalization_23[0][0]
__________________________________________________________________________________________________
block35_2_mixed (Concatenate) (None, 25, 25, 128) 0 activation_18[0][0]
activation_20[0][0]
activation_23[0][0]
__________________________________________________________________________________________________
block35_2_conv (Conv2D) (None, 25, 25, 320) 41280 block35_2_mixed[0][0]
__________________________________________________________________________________________________
block35_2 (Lambda) (None, 25, 25, 320) 0 block35_1_ac[0][0]
block35_2_conv[0][0]
__________________________________________________________________________________________________
block35_2_ac (Activation) (None, 25, 25, 320) 0 block35_2[0][0]
__________________________________________________________________________________________________
conv2d_27 (Conv2D) (None, 25, 25, 32) 10240 block35_2_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_27 (BatchNo (None, 25, 25, 32) 96 conv2d_27[0][0]
__________________________________________________________________________________________________
activation_27 (Activation) (None, 25, 25, 32) 0 batch_normalization_27[0][0]
__________________________________________________________________________________________________
conv2d_25 (Conv2D) (None, 25, 25, 32) 10240 block35_2_ac[0][0]
__________________________________________________________________________________________________
conv2d_28 (Conv2D) (None, 25, 25, 48) 13824 activation_27[0][0]
__________________________________________________________________________________________________
batch_normalization_25 (BatchNo (None, 25, 25, 32) 96 conv2d_25[0][0]
__________________________________________________________________________________________________
batch_normalization_28 (BatchNo (None, 25, 25, 48) 144 conv2d_28[0][0]
__________________________________________________________________________________________________
activation_25 (Activation) (None, 25, 25, 32) 0 batch_normalization_25[0][0]
__________________________________________________________________________________________________
activation_28 (Activation) (None, 25, 25, 48) 0 batch_normalization_28[0][0]
__________________________________________________________________________________________________
conv2d_24 (Conv2D) (None, 25, 25, 32) 10240 block35_2_ac[0][0]
__________________________________________________________________________________________________
conv2d_26 (Conv2D) (None, 25, 25, 32) 9216 activation_25[0][0]
__________________________________________________________________________________________________
conv2d_29 (Conv2D) (None, 25, 25, 64) 27648 activation_28[0][0]
__________________________________________________________________________________________________
batch_normalization_24 (BatchNo (None, 25, 25, 32) 96 conv2d_24[0][0]
__________________________________________________________________________________________________
batch_normalization_26 (BatchNo (None, 25, 25, 32) 96 conv2d_26[0][0]
__________________________________________________________________________________________________
batch_normalization_29 (BatchNo (None, 25, 25, 64) 192 conv2d_29[0][0]
__________________________________________________________________________________________________
activation_24 (Activation) (None, 25, 25, 32) 0 batch_normalization_24[0][0]
__________________________________________________________________________________________________
activation_26 (Activation) (None, 25, 25, 32) 0 batch_normalization_26[0][0]
__________________________________________________________________________________________________
activation_29 (Activation) (None, 25, 25, 64) 0 batch_normalization_29[0][0]
__________________________________________________________________________________________________
block35_3_mixed (Concatenate) (None, 25, 25, 128) 0 activation_24[0][0]
activation_26[0][0]
activation_29[0][0]
__________________________________________________________________________________________________
block35_3_conv (Conv2D) (None, 25, 25, 320) 41280 block35_3_mixed[0][0]
__________________________________________________________________________________________________
block35_3 (Lambda) (None, 25, 25, 320) 0 block35_2_ac[0][0]
block35_3_conv[0][0]
__________________________________________________________________________________________________
block35_3_ac (Activation) (None, 25, 25, 320) 0 block35_3[0][0]
__________________________________________________________________________________________________
conv2d_33 (Conv2D) (None, 25, 25, 32) 10240 block35_3_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_33 (BatchNo (None, 25, 25, 32) 96 conv2d_33[0][0]
__________________________________________________________________________________________________
activation_33 (Activation) (None, 25, 25, 32) 0 batch_normalization_33[0][0]
__________________________________________________________________________________________________
conv2d_31 (Conv2D) (None, 25, 25, 32) 10240 block35_3_ac[0][0]
__________________________________________________________________________________________________
conv2d_34 (Conv2D) (None, 25, 25, 48) 13824 activation_33[0][0]
__________________________________________________________________________________________________
batch_normalization_31 (BatchNo (None, 25, 25, 32) 96 conv2d_31[0][0]
__________________________________________________________________________________________________
batch_normalization_34 (BatchNo (None, 25, 25, 48) 144 conv2d_34[0][0]
__________________________________________________________________________________________________
activation_31 (Activation) (None, 25, 25, 32) 0 batch_normalization_31[0][0]
__________________________________________________________________________________________________
activation_34 (Activation) (None, 25, 25, 48) 0 batch_normalization_34[0][0]
__________________________________________________________________________________________________
conv2d_30 (Conv2D) (None, 25, 25, 32) 10240 block35_3_ac[0][0]
__________________________________________________________________________________________________
conv2d_32 (Conv2D) (None, 25, 25, 32) 9216 activation_31[0][0]
__________________________________________________________________________________________________
conv2d_35 (Conv2D) (None, 25, 25, 64) 27648 activation_34[0][0]
__________________________________________________________________________________________________
batch_normalization_30 (BatchNo (None, 25, 25, 32) 96 conv2d_30[0][0]
__________________________________________________________________________________________________
batch_normalization_32 (BatchNo (None, 25, 25, 32) 96 conv2d_32[0][0]
__________________________________________________________________________________________________
batch_normalization_35 (BatchNo (None, 25, 25, 64) 192 conv2d_35[0][0]
__________________________________________________________________________________________________
activation_30 (Activation) (None, 25, 25, 32) 0 batch_normalization_30[0][0]
__________________________________________________________________________________________________
activation_32 (Activation) (None, 25, 25, 32) 0 batch_normalization_32[0][0]
__________________________________________________________________________________________________
activation_35 (Activation) (None, 25, 25, 64) 0 batch_normalization_35[0][0]
__________________________________________________________________________________________________
block35_4_mixed (Concatenate) (None, 25, 25, 128) 0 activation_30[0][0]
activation_32[0][0]
activation_35[0][0]
__________________________________________________________________________________________________
block35_4_conv (Conv2D) (None, 25, 25, 320) 41280 block35_4_mixed[0][0]
__________________________________________________________________________________________________
block35_4 (Lambda) (None, 25, 25, 320) 0 block35_3_ac[0][0]
block35_4_conv[0][0]
__________________________________________________________________________________________________
block35_4_ac (Activation) (None, 25, 25, 320) 0 block35_4[0][0]
__________________________________________________________________________________________________
conv2d_39 (Conv2D) (None, 25, 25, 32) 10240 block35_4_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_39 (BatchNo (None, 25, 25, 32) 96 conv2d_39[0][0]
__________________________________________________________________________________________________
activation_39 (Activation) (None, 25, 25, 32) 0 batch_normalization_39[0][0]
__________________________________________________________________________________________________
conv2d_37 (Conv2D) (None, 25, 25, 32) 10240 block35_4_ac[0][0]
__________________________________________________________________________________________________
conv2d_40 (Conv2D) (None, 25, 25, 48) 13824 activation_39[0][0]
__________________________________________________________________________________________________
batch_normalization_37 (BatchNo (None, 25, 25, 32) 96 conv2d_37[0][0]
__________________________________________________________________________________________________
batch_normalization_40 (BatchNo (None, 25, 25, 48) 144 conv2d_40[0][0]
__________________________________________________________________________________________________
activation_37 (Activation) (None, 25, 25, 32) 0 batch_normalization_37[0][0]
__________________________________________________________________________________________________
activation_40 (Activation) (None, 25, 25, 48) 0 batch_normalization_40[0][0]
__________________________________________________________________________________________________
conv2d_36 (Conv2D) (None, 25, 25, 32) 10240 block35_4_ac[0][0]
__________________________________________________________________________________________________
conv2d_38 (Conv2D) (None, 25, 25, 32) 9216 activation_37[0][0]
__________________________________________________________________________________________________
conv2d_41 (Conv2D) (None, 25, 25, 64) 27648 activation_40[0][0]
__________________________________________________________________________________________________
batch_normalization_36 (BatchNo (None, 25, 25, 32) 96 conv2d_36[0][0]
__________________________________________________________________________________________________
batch_normalization_38 (BatchNo (None, 25, 25, 32) 96 conv2d_38[0][0]
__________________________________________________________________________________________________
batch_normalization_41 (BatchNo (None, 25, 25, 64) 192 conv2d_41[0][0]
__________________________________________________________________________________________________
activation_36 (Activation) (None, 25, 25, 32) 0 batch_normalization_36[0][0]
__________________________________________________________________________________________________
activation_38 (Activation) (None, 25, 25, 32) 0 batch_normalization_38[0][0]
__________________________________________________________________________________________________
activation_41 (Activation) (None, 25, 25, 64) 0 batch_normalization_41[0][0]
__________________________________________________________________________________________________
block35_5_mixed (Concatenate) (None, 25, 25, 128) 0 activation_36[0][0]
activation_38[0][0]
activation_41[0][0]
__________________________________________________________________________________________________
block35_5_conv (Conv2D) (None, 25, 25, 320) 41280 block35_5_mixed[0][0]
__________________________________________________________________________________________________
block35_5 (Lambda) (None, 25, 25, 320) 0 block35_4_ac[0][0]
block35_5_conv[0][0]
__________________________________________________________________________________________________
block35_5_ac (Activation) (None, 25, 25, 320) 0 block35_5[0][0]
__________________________________________________________________________________________________
conv2d_45 (Conv2D) (None, 25, 25, 32) 10240 block35_5_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_45 (BatchNo (None, 25, 25, 32) 96 conv2d_45[0][0]
__________________________________________________________________________________________________
activation_45 (Activation) (None, 25, 25, 32) 0 batch_normalization_45[0][0]
__________________________________________________________________________________________________
conv2d_43 (Conv2D) (None, 25, 25, 32) 10240 block35_5_ac[0][0]
__________________________________________________________________________________________________
conv2d_46 (Conv2D) (None, 25, 25, 48) 13824 activation_45[0][0]
__________________________________________________________________________________________________
batch_normalization_43 (BatchNo (None, 25, 25, 32) 96 conv2d_43[0][0]
__________________________________________________________________________________________________
batch_normalization_46 (BatchNo (None, 25, 25, 48) 144 conv2d_46[0][0]
__________________________________________________________________________________________________
activation_43 (Activation) (None, 25, 25, 32) 0 batch_normalization_43[0][0]
__________________________________________________________________________________________________
activation_46 (Activation) (None, 25, 25, 48) 0 batch_normalization_46[0][0]
__________________________________________________________________________________________________
conv2d_42 (Conv2D) (None, 25, 25, 32) 10240 block35_5_ac[0][0]
__________________________________________________________________________________________________
conv2d_44 (Conv2D) (None, 25, 25, 32) 9216 activation_43[0][0]
__________________________________________________________________________________________________
conv2d_47 (Conv2D) (None, 25, 25, 64) 27648 activation_46[0][0]
__________________________________________________________________________________________________
batch_normalization_42 (BatchNo (None, 25, 25, 32) 96 conv2d_42[0][0]
__________________________________________________________________________________________________
batch_normalization_44 (BatchNo (None, 25, 25, 32) 96 conv2d_44[0][0]
__________________________________________________________________________________________________
batch_normalization_47 (BatchNo (None, 25, 25, 64) 192 conv2d_47[0][0]
__________________________________________________________________________________________________
activation_42 (Activation) (None, 25, 25, 32) 0 batch_normalization_42[0][0]
__________________________________________________________________________________________________
activation_44 (Activation) (None, 25, 25, 32) 0 batch_normalization_44[0][0]
__________________________________________________________________________________________________
activation_47 (Activation) (None, 25, 25, 64) 0 batch_normalization_47[0][0]
__________________________________________________________________________________________________
block35_6_mixed (Concatenate) (None, 25, 25, 128) 0 activation_42[0][0]
activation_44[0][0]
activation_47[0][0]
__________________________________________________________________________________________________
block35_6_conv (Conv2D) (None, 25, 25, 320) 41280 block35_6_mixed[0][0]
__________________________________________________________________________________________________
block35_6 (Lambda) (None, 25, 25, 320) 0 block35_5_ac[0][0]
block35_6_conv[0][0]
__________________________________________________________________________________________________
block35_6_ac (Activation) (None, 25, 25, 320) 0 block35_6[0][0]
__________________________________________________________________________________________________
conv2d_51 (Conv2D) (None, 25, 25, 32) 10240 block35_6_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_51 (BatchNo (None, 25, 25, 32) 96 conv2d_51[0][0]
__________________________________________________________________________________________________
activation_51 (Activation) (None, 25, 25, 32) 0 batch_normalization_51[0][0]
__________________________________________________________________________________________________
conv2d_49 (Conv2D) (None, 25, 25, 32) 10240 block35_6_ac[0][0]
__________________________________________________________________________________________________
conv2d_52 (Conv2D) (None, 25, 25, 48) 13824 activation_51[0][0]
__________________________________________________________________________________________________
batch_normalization_49 (BatchNo (None, 25, 25, 32) 96 conv2d_49[0][0]
__________________________________________________________________________________________________
batch_normalization_52 (BatchNo (None, 25, 25, 48) 144 conv2d_52[0][0]
__________________________________________________________________________________________________
activation_49 (Activation) (None, 25, 25, 32) 0 batch_normalization_49[0][0]
__________________________________________________________________________________________________
activation_52 (Activation) (None, 25, 25, 48) 0 batch_normalization_52[0][0]
__________________________________________________________________________________________________
conv2d_48 (Conv2D) (None, 25, 25, 32) 10240 block35_6_ac[0][0]
__________________________________________________________________________________________________
conv2d_50 (Conv2D) (None, 25, 25, 32) 9216 activation_49[0][0]
__________________________________________________________________________________________________
conv2d_53 (Conv2D) (None, 25, 25, 64) 27648 activation_52[0][0]
__________________________________________________________________________________________________
batch_normalization_48 (BatchNo (None, 25, 25, 32) 96 conv2d_48[0][0]
__________________________________________________________________________________________________
batch_normalization_50 (BatchNo (None, 25, 25, 32) 96 conv2d_50[0][0]
__________________________________________________________________________________________________
batch_normalization_53 (BatchNo (None, 25, 25, 64) 192 conv2d_53[0][0]
__________________________________________________________________________________________________
activation_48 (Activation) (None, 25, 25, 32) 0 batch_normalization_48[0][0]
__________________________________________________________________________________________________
activation_50 (Activation) (None, 25, 25, 32) 0 batch_normalization_50[0][0]
__________________________________________________________________________________________________
activation_53 (Activation) (None, 25, 25, 64) 0 batch_normalization_53[0][0]
__________________________________________________________________________________________________
block35_7_mixed (Concatenate) (None, 25, 25, 128) 0 activation_48[0][0]
activation_50[0][0]
activation_53[0][0]
__________________________________________________________________________________________________
block35_7_conv (Conv2D) (None, 25, 25, 320) 41280 block35_7_mixed[0][0]
__________________________________________________________________________________________________
block35_7 (Lambda) (None, 25, 25, 320) 0 block35_6_ac[0][0]
block35_7_conv[0][0]
__________________________________________________________________________________________________
block35_7_ac (Activation) (None, 25, 25, 320) 0 block35_7[0][0]
__________________________________________________________________________________________________
conv2d_57 (Conv2D) (None, 25, 25, 32) 10240 block35_7_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_57 (BatchNo (None, 25, 25, 32) 96 conv2d_57[0][0]
__________________________________________________________________________________________________
activation_57 (Activation) (None, 25, 25, 32) 0 batch_normalization_57[0][0]
__________________________________________________________________________________________________
conv2d_55 (Conv2D) (None, 25, 25, 32) 10240 block35_7_ac[0][0]
__________________________________________________________________________________________________
conv2d_58 (Conv2D) (None, 25, 25, 48) 13824 activation_57[0][0]
__________________________________________________________________________________________________
batch_normalization_55 (BatchNo (None, 25, 25, 32) 96 conv2d_55[0][0]
__________________________________________________________________________________________________
batch_normalization_58 (BatchNo (None, 25, 25, 48) 144 conv2d_58[0][0]
__________________________________________________________________________________________________
activation_55 (Activation) (None, 25, 25, 32) 0 batch_normalization_55[0][0]
__________________________________________________________________________________________________
activation_58 (Activation) (None, 25, 25, 48) 0 batch_normalization_58[0][0]
__________________________________________________________________________________________________
conv2d_54 (Conv2D) (None, 25, 25, 32) 10240 block35_7_ac[0][0]
__________________________________________________________________________________________________
conv2d_56 (Conv2D) (None, 25, 25, 32) 9216 activation_55[0][0]
__________________________________________________________________________________________________
conv2d_59 (Conv2D) (None, 25, 25, 64) 27648 activation_58[0][0]
__________________________________________________________________________________________________
batch_normalization_54 (BatchNo (None, 25, 25, 32) 96 conv2d_54[0][0]
__________________________________________________________________________________________________
batch_normalization_56 (BatchNo (None, 25, 25, 32) 96 conv2d_56[0][0]
__________________________________________________________________________________________________
batch_normalization_59 (BatchNo (None, 25, 25, 64) 192 conv2d_59[0][0]
__________________________________________________________________________________________________
activation_54 (Activation) (None, 25, 25, 32) 0 batch_normalization_54[0][0]
__________________________________________________________________________________________________
activation_56 (Activation) (None, 25, 25, 32) 0 batch_normalization_56[0][0]
__________________________________________________________________________________________________
activation_59 (Activation) (None, 25, 25, 64) 0 batch_normalization_59[0][0]
__________________________________________________________________________________________________
block35_8_mixed (Concatenate) (None, 25, 25, 128) 0 activation_54[0][0]
activation_56[0][0]
activation_59[0][0]
__________________________________________________________________________________________________
block35_8_conv (Conv2D) (None, 25, 25, 320) 41280 block35_8_mixed[0][0]
__________________________________________________________________________________________________
block35_8 (Lambda) (None, 25, 25, 320) 0 block35_7_ac[0][0]
block35_8_conv[0][0]
__________________________________________________________________________________________________
block35_8_ac (Activation) (None, 25, 25, 320) 0 block35_8[0][0]
__________________________________________________________________________________________________
conv2d_63 (Conv2D) (None, 25, 25, 32) 10240 block35_8_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_63 (BatchNo (None, 25, 25, 32) 96 conv2d_63[0][0]
__________________________________________________________________________________________________
activation_63 (Activation) (None, 25, 25, 32) 0 batch_normalization_63[0][0]
__________________________________________________________________________________________________
conv2d_61 (Conv2D) (None, 25, 25, 32) 10240 block35_8_ac[0][0]
__________________________________________________________________________________________________
conv2d_64 (Conv2D) (None, 25, 25, 48) 13824 activation_63[0][0]
__________________________________________________________________________________________________
batch_normalization_61 (BatchNo (None, 25, 25, 32) 96 conv2d_61[0][0]
__________________________________________________________________________________________________
batch_normalization_64 (BatchNo (None, 25, 25, 48) 144 conv2d_64[0][0]
__________________________________________________________________________________________________
activation_61 (Activation) (None, 25, 25, 32) 0 batch_normalization_61[0][0]
__________________________________________________________________________________________________
activation_64 (Activation) (None, 25, 25, 48) 0 batch_normalization_64[0][0]
__________________________________________________________________________________________________
conv2d_60 (Conv2D) (None, 25, 25, 32) 10240 block35_8_ac[0][0]
__________________________________________________________________________________________________
conv2d_62 (Conv2D) (None, 25, 25, 32) 9216 activation_61[0][0]
__________________________________________________________________________________________________
conv2d_65 (Conv2D) (None, 25, 25, 64) 27648 activation_64[0][0]
__________________________________________________________________________________________________
batch_normalization_60 (BatchNo (None, 25, 25, 32) 96 conv2d_60[0][0]
__________________________________________________________________________________________________
batch_normalization_62 (BatchNo (None, 25, 25, 32) 96 conv2d_62[0][0]
__________________________________________________________________________________________________
batch_normalization_65 (BatchNo (None, 25, 25, 64) 192 conv2d_65[0][0]
__________________________________________________________________________________________________
activation_60 (Activation) (None, 25, 25, 32) 0 batch_normalization_60[0][0]
__________________________________________________________________________________________________
activation_62 (Activation) (None, 25, 25, 32) 0 batch_normalization_62[0][0]
__________________________________________________________________________________________________
activation_65 (Activation) (None, 25, 25, 64) 0 batch_normalization_65[0][0]
__________________________________________________________________________________________________
block35_9_mixed (Concatenate) (None, 25, 25, 128) 0 activation_60[0][0]
activation_62[0][0]
activation_65[0][0]
__________________________________________________________________________________________________
block35_9_conv (Conv2D) (None, 25, 25, 320) 41280 block35_9_mixed[0][0]
__________________________________________________________________________________________________
block35_9 (Lambda) (None, 25, 25, 320) 0 block35_8_ac[0][0]
block35_9_conv[0][0]
__________________________________________________________________________________________________
block35_9_ac (Activation) (None, 25, 25, 320) 0 block35_9[0][0]
__________________________________________________________________________________________________
conv2d_69 (Conv2D) (None, 25, 25, 32) 10240 block35_9_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_69 (BatchNo (None, 25, 25, 32) 96 conv2d_69[0][0]
__________________________________________________________________________________________________
activation_69 (Activation) (None, 25, 25, 32) 0 batch_normalization_69[0][0]
__________________________________________________________________________________________________
conv2d_67 (Conv2D) (None, 25, 25, 32) 10240 block35_9_ac[0][0]
__________________________________________________________________________________________________
conv2d_70 (Conv2D) (None, 25, 25, 48) 13824 activation_69[0][0]
__________________________________________________________________________________________________
batch_normalization_67 (BatchNo (None, 25, 25, 32) 96 conv2d_67[0][0]
__________________________________________________________________________________________________
batch_normalization_70 (BatchNo (None, 25, 25, 48) 144 conv2d_70[0][0]
__________________________________________________________________________________________________
activation_67 (Activation) (None, 25, 25, 32) 0 batch_normalization_67[0][0]
__________________________________________________________________________________________________
activation_70 (Activation) (None, 25, 25, 48) 0 batch_normalization_70[0][0]
__________________________________________________________________________________________________
conv2d_66 (Conv2D) (None, 25, 25, 32) 10240 block35_9_ac[0][0]
__________________________________________________________________________________________________
conv2d_68 (Conv2D) (None, 25, 25, 32) 9216 activation_67[0][0]
__________________________________________________________________________________________________
conv2d_71 (Conv2D) (None, 25, 25, 64) 27648 activation_70[0][0]
__________________________________________________________________________________________________
batch_normalization_66 (BatchNo (None, 25, 25, 32) 96 conv2d_66[0][0]
__________________________________________________________________________________________________
batch_normalization_68 (BatchNo (None, 25, 25, 32) 96 conv2d_68[0][0]
__________________________________________________________________________________________________
batch_normalization_71 (BatchNo (None, 25, 25, 64) 192 conv2d_71[0][0]
__________________________________________________________________________________________________
activation_66 (Activation) (None, 25, 25, 32) 0 batch_normalization_66[0][0]
__________________________________________________________________________________________________
activation_68 (Activation) (None, 25, 25, 32) 0 batch_normalization_68[0][0]
__________________________________________________________________________________________________
activation_71 (Activation) (None, 25, 25, 64) 0 batch_normalization_71[0][0]
__________________________________________________________________________________________________
block35_10_mixed (Concatenate) (None, 25, 25, 128) 0 activation_66[0][0]
activation_68[0][0]
activation_71[0][0]
__________________________________________________________________________________________________
block35_10_conv (Conv2D) (None, 25, 25, 320) 41280 block35_10_mixed[0][0]
__________________________________________________________________________________________________
block35_10 (Lambda) (None, 25, 25, 320) 0 block35_9_ac[0][0]
block35_10_conv[0][0]
__________________________________________________________________________________________________
block35_10_ac (Activation) (None, 25, 25, 320) 0 block35_10[0][0]
__________________________________________________________________________________________________
conv2d_73 (Conv2D) (None, 25, 25, 256) 81920 block35_10_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_73 (BatchNo (None, 25, 25, 256) 768 conv2d_73[0][0]
__________________________________________________________________________________________________
activation_73 (Activation) (None, 25, 25, 256) 0 batch_normalization_73[0][0]
__________________________________________________________________________________________________
conv2d_74 (Conv2D) (None, 25, 25, 256) 589824 activation_73[0][0]
__________________________________________________________________________________________________
batch_normalization_74 (BatchNo (None, 25, 25, 256) 768 conv2d_74[0][0]
__________________________________________________________________________________________________
activation_74 (Activation) (None, 25, 25, 256) 0 batch_normalization_74[0][0]
__________________________________________________________________________________________________
conv2d_72 (Conv2D) (None, 12, 12, 384) 1105920 block35_10_ac[0][0]
__________________________________________________________________________________________________
conv2d_75 (Conv2D) (None, 12, 12, 384) 884736 activation_74[0][0]
__________________________________________________________________________________________________
batch_normalization_72 (BatchNo (None, 12, 12, 384) 1152 conv2d_72[0][0]
__________________________________________________________________________________________________
batch_normalization_75 (BatchNo (None, 12, 12, 384) 1152 conv2d_75[0][0]
__________________________________________________________________________________________________
activation_72 (Activation) (None, 12, 12, 384) 0 batch_normalization_72[0][0]
__________________________________________________________________________________________________
activation_75 (Activation) (None, 12, 12, 384) 0 batch_normalization_75[0][0]
__________________________________________________________________________________________________
max_pooling2d_2 (MaxPooling2D) (None, 12, 12, 320) 0 block35_10_ac[0][0]
__________________________________________________________________________________________________
mixed_6a (Concatenate) (None, 12, 12, 1088) 0 activation_72[0][0]
activation_75[0][0]
max_pooling2d_2[0][0]
__________________________________________________________________________________________________
conv2d_77 (Conv2D) (None, 12, 12, 128) 139264 mixed_6a[0][0]
__________________________________________________________________________________________________
batch_normalization_77 (BatchNo (None, 12, 12, 128) 384 conv2d_77[0][0]
__________________________________________________________________________________________________
activation_77 (Activation) (None, 12, 12, 128) 0 batch_normalization_77[0][0]
__________________________________________________________________________________________________
conv2d_78 (Conv2D) (None, 12, 12, 160) 143360 activation_77[0][0]
__________________________________________________________________________________________________
batch_normalization_78 (BatchNo (None, 12, 12, 160) 480 conv2d_78[0][0]
__________________________________________________________________________________________________
activation_78 (Activation) (None, 12, 12, 160) 0 batch_normalization_78[0][0]
__________________________________________________________________________________________________
conv2d_76 (Conv2D) (None, 12, 12, 192) 208896 mixed_6a[0][0]
__________________________________________________________________________________________________
conv2d_79 (Conv2D) (None, 12, 12, 192) 215040 activation_78[0][0]
__________________________________________________________________________________________________
batch_normalization_76 (BatchNo (None, 12, 12, 192) 576 conv2d_76[0][0]
__________________________________________________________________________________________________
batch_normalization_79 (BatchNo (None, 12, 12, 192) 576 conv2d_79[0][0]
__________________________________________________________________________________________________
activation_76 (Activation) (None, 12, 12, 192) 0 batch_normalization_76[0][0]
__________________________________________________________________________________________________
activation_79 (Activation) (None, 12, 12, 192) 0 batch_normalization_79[0][0]
__________________________________________________________________________________________________
block17_1_mixed (Concatenate) (None, 12, 12, 384) 0 activation_76[0][0]
activation_79[0][0]
__________________________________________________________________________________________________
block17_1_conv (Conv2D) (None, 12, 12, 1088) 418880 block17_1_mixed[0][0]
__________________________________________________________________________________________________
block17_1 (Lambda) (None, 12, 12, 1088) 0 mixed_6a[0][0]
block17_1_conv[0][0]
__________________________________________________________________________________________________
block17_1_ac (Activation) (None, 12, 12, 1088) 0 block17_1[0][0]
__________________________________________________________________________________________________
conv2d_81 (Conv2D) (None, 12, 12, 128) 139264 block17_1_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_81 (BatchNo (None, 12, 12, 128) 384 conv2d_81[0][0]
__________________________________________________________________________________________________
activation_81 (Activation) (None, 12, 12, 128) 0 batch_normalization_81[0][0]
__________________________________________________________________________________________________
conv2d_82 (Conv2D) (None, 12, 12, 160) 143360 activation_81[0][0]
__________________________________________________________________________________________________
batch_normalization_82 (BatchNo (None, 12, 12, 160) 480 conv2d_82[0][0]
__________________________________________________________________________________________________
activation_82 (Activation) (None, 12, 12, 160) 0 batch_normalization_82[0][0]
__________________________________________________________________________________________________
conv2d_80 (Conv2D) (None, 12, 12, 192) 208896 block17_1_ac[0][0]
__________________________________________________________________________________________________
conv2d_83 (Conv2D) (None, 12, 12, 192) 215040 activation_82[0][0]
__________________________________________________________________________________________________
batch_normalization_80 (BatchNo (None, 12, 12, 192) 576 conv2d_80[0][0]
__________________________________________________________________________________________________
batch_normalization_83 (BatchNo (None, 12, 12, 192) 576 conv2d_83[0][0]
__________________________________________________________________________________________________
activation_80 (Activation) (None, 12, 12, 192) 0 batch_normalization_80[0][0]
__________________________________________________________________________________________________
activation_83 (Activation) (None, 12, 12, 192) 0 batch_normalization_83[0][0]
__________________________________________________________________________________________________
block17_2_mixed (Concatenate) (None, 12, 12, 384) 0 activation_80[0][0]
activation_83[0][0]
__________________________________________________________________________________________________
block17_2_conv (Conv2D) (None, 12, 12, 1088) 418880 block17_2_mixed[0][0]
__________________________________________________________________________________________________
block17_2 (Lambda) (None, 12, 12, 1088) 0 block17_1_ac[0][0]
block17_2_conv[0][0]
__________________________________________________________________________________________________
block17_2_ac (Activation) (None, 12, 12, 1088) 0 block17_2[0][0]
__________________________________________________________________________________________________
conv2d_85 (Conv2D) (None, 12, 12, 128) 139264 block17_2_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_85 (BatchNo (None, 12, 12, 128) 384 conv2d_85[0][0]
__________________________________________________________________________________________________
activation_85 (Activation) (None, 12, 12, 128) 0 batch_normalization_85[0][0]
__________________________________________________________________________________________________
conv2d_86 (Conv2D) (None, 12, 12, 160) 143360 activation_85[0][0]
__________________________________________________________________________________________________
batch_normalization_86 (BatchNo (None, 12, 12, 160) 480 conv2d_86[0][0]
__________________________________________________________________________________________________
activation_86 (Activation) (None, 12, 12, 160) 0 batch_normalization_86[0][0]
__________________________________________________________________________________________________
conv2d_84 (Conv2D) (None, 12, 12, 192) 208896 block17_2_ac[0][0]
__________________________________________________________________________________________________
conv2d_87 (Conv2D) (None, 12, 12, 192) 215040 activation_86[0][0]
__________________________________________________________________________________________________
batch_normalization_84 (BatchNo (None, 12, 12, 192) 576 conv2d_84[0][0]
__________________________________________________________________________________________________
batch_normalization_87 (BatchNo (None, 12, 12, 192) 576 conv2d_87[0][0]
__________________________________________________________________________________________________
activation_84 (Activation) (None, 12, 12, 192) 0 batch_normalization_84[0][0]
__________________________________________________________________________________________________
activation_87 (Activation) (None, 12, 12, 192) 0 batch_normalization_87[0][0]
__________________________________________________________________________________________________
block17_3_mixed (Concatenate) (None, 12, 12, 384) 0 activation_84[0][0]
activation_87[0][0]
__________________________________________________________________________________________________
block17_3_conv (Conv2D) (None, 12, 12, 1088) 418880 block17_3_mixed[0][0]
__________________________________________________________________________________________________
block17_3 (Lambda) (None, 12, 12, 1088) 0 block17_2_ac[0][0]
block17_3_conv[0][0]
__________________________________________________________________________________________________
block17_3_ac (Activation) (None, 12, 12, 1088) 0 block17_3[0][0]
__________________________________________________________________________________________________
conv2d_89 (Conv2D) (None, 12, 12, 128) 139264 block17_3_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_89 (BatchNo (None, 12, 12, 128) 384 conv2d_89[0][0]
__________________________________________________________________________________________________
activation_89 (Activation) (None, 12, 12, 128) 0 batch_normalization_89[0][0]
__________________________________________________________________________________________________
conv2d_90 (Conv2D) (None, 12, 12, 160) 143360 activation_89[0][0]
__________________________________________________________________________________________________
batch_normalization_90 (BatchNo (None, 12, 12, 160) 480 conv2d_90[0][0]
__________________________________________________________________________________________________
activation_90 (Activation) (None, 12, 12, 160) 0 batch_normalization_90[0][0]
__________________________________________________________________________________________________
conv2d_88 (Conv2D) (None, 12, 12, 192) 208896 block17_3_ac[0][0]
__________________________________________________________________________________________________
conv2d_91 (Conv2D) (None, 12, 12, 192) 215040 activation_90[0][0]
__________________________________________________________________________________________________
batch_normalization_88 (BatchNo (None, 12, 12, 192) 576 conv2d_88[0][0]
__________________________________________________________________________________________________
batch_normalization_91 (BatchNo (None, 12, 12, 192) 576 conv2d_91[0][0]
__________________________________________________________________________________________________
activation_88 (Activation) (None, 12, 12, 192) 0 batch_normalization_88[0][0]
__________________________________________________________________________________________________
activation_91 (Activation) (None, 12, 12, 192) 0 batch_normalization_91[0][0]
__________________________________________________________________________________________________
block17_4_mixed (Concatenate) (None, 12, 12, 384) 0 activation_88[0][0]
activation_91[0][0]
__________________________________________________________________________________________________
block17_4_conv (Conv2D) (None, 12, 12, 1088) 418880 block17_4_mixed[0][0]
__________________________________________________________________________________________________
block17_4 (Lambda) (None, 12, 12, 1088) 0 block17_3_ac[0][0]
block17_4_conv[0][0]
__________________________________________________________________________________________________
block17_4_ac (Activation) (None, 12, 12, 1088) 0 block17_4[0][0]
__________________________________________________________________________________________________
conv2d_93 (Conv2D) (None, 12, 12, 128) 139264 block17_4_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_93 (BatchNo (None, 12, 12, 128) 384 conv2d_93[0][0]
__________________________________________________________________________________________________
activation_93 (Activation) (None, 12, 12, 128) 0 batch_normalization_93[0][0]
__________________________________________________________________________________________________
conv2d_94 (Conv2D) (None, 12, 12, 160) 143360 activation_93[0][0]
__________________________________________________________________________________________________
batch_normalization_94 (BatchNo (None, 12, 12, 160) 480 conv2d_94[0][0]
__________________________________________________________________________________________________
activation_94 (Activation) (None, 12, 12, 160) 0 batch_normalization_94[0][0]
__________________________________________________________________________________________________
conv2d_92 (Conv2D) (None, 12, 12, 192) 208896 block17_4_ac[0][0]
__________________________________________________________________________________________________
conv2d_95 (Conv2D) (None, 12, 12, 192) 215040 activation_94[0][0]
__________________________________________________________________________________________________
batch_normalization_92 (BatchNo (None, 12, 12, 192) 576 conv2d_92[0][0]
__________________________________________________________________________________________________
batch_normalization_95 (BatchNo (None, 12, 12, 192) 576 conv2d_95[0][0]
__________________________________________________________________________________________________
activation_92 (Activation) (None, 12, 12, 192) 0 batch_normalization_92[0][0]
__________________________________________________________________________________________________
activation_95 (Activation) (None, 12, 12, 192) 0 batch_normalization_95[0][0]
__________________________________________________________________________________________________
block17_5_mixed (Concatenate) (None, 12, 12, 384) 0 activation_92[0][0]
activation_95[0][0]
__________________________________________________________________________________________________
block17_5_conv (Conv2D) (None, 12, 12, 1088) 418880 block17_5_mixed[0][0]
__________________________________________________________________________________________________
block17_5 (Lambda) (None, 12, 12, 1088) 0 block17_4_ac[0][0]
block17_5_conv[0][0]
__________________________________________________________________________________________________
block17_5_ac (Activation) (None, 12, 12, 1088) 0 block17_5[0][0]
__________________________________________________________________________________________________
conv2d_97 (Conv2D) (None, 12, 12, 128) 139264 block17_5_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_97 (BatchNo (None, 12, 12, 128) 384 conv2d_97[0][0]
__________________________________________________________________________________________________
activation_97 (Activation) (None, 12, 12, 128) 0 batch_normalization_97[0][0]
__________________________________________________________________________________________________
conv2d_98 (Conv2D) (None, 12, 12, 160) 143360 activation_97[0][0]
__________________________________________________________________________________________________
batch_normalization_98 (BatchNo (None, 12, 12, 160) 480 conv2d_98[0][0]
__________________________________________________________________________________________________
activation_98 (Activation) (None, 12, 12, 160) 0 batch_normalization_98[0][0]
__________________________________________________________________________________________________
conv2d_96 (Conv2D) (None, 12, 12, 192) 208896 block17_5_ac[0][0]
__________________________________________________________________________________________________
conv2d_99 (Conv2D) (None, 12, 12, 192) 215040 activation_98[0][0]
__________________________________________________________________________________________________
batch_normalization_96 (BatchNo (None, 12, 12, 192) 576 conv2d_96[0][0]
__________________________________________________________________________________________________
batch_normalization_99 (BatchNo (None, 12, 12, 192) 576 conv2d_99[0][0]
__________________________________________________________________________________________________
activation_96 (Activation) (None, 12, 12, 192) 0 batch_normalization_96[0][0]
__________________________________________________________________________________________________
activation_99 (Activation) (None, 12, 12, 192) 0 batch_normalization_99[0][0]
__________________________________________________________________________________________________
block17_6_mixed (Concatenate) (None, 12, 12, 384) 0 activation_96[0][0]
activation_99[0][0]
__________________________________________________________________________________________________
block17_6_conv (Conv2D) (None, 12, 12, 1088) 418880 block17_6_mixed[0][0]
__________________________________________________________________________________________________
block17_6 (Lambda) (None, 12, 12, 1088) 0 block17_5_ac[0][0]
block17_6_conv[0][0]
__________________________________________________________________________________________________
block17_6_ac (Activation) (None, 12, 12, 1088) 0 block17_6[0][0]
__________________________________________________________________________________________________
conv2d_101 (Conv2D) (None, 12, 12, 128) 139264 block17_6_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_101 (BatchN (None, 12, 12, 128) 384 conv2d_101[0][0]
__________________________________________________________________________________________________
activation_101 (Activation) (None, 12, 12, 128) 0 batch_normalization_101[0][0]
__________________________________________________________________________________________________
conv2d_102 (Conv2D) (None, 12, 12, 160) 143360 activation_101[0][0]
__________________________________________________________________________________________________
batch_normalization_102 (BatchN (None, 12, 12, 160) 480 conv2d_102[0][0]
__________________________________________________________________________________________________
activation_102 (Activation) (None, 12, 12, 160) 0 batch_normalization_102[0][0]
__________________________________________________________________________________________________
conv2d_100 (Conv2D) (None, 12, 12, 192) 208896 block17_6_ac[0][0]
__________________________________________________________________________________________________
conv2d_103 (Conv2D) (None, 12, 12, 192) 215040 activation_102[0][0]
__________________________________________________________________________________________________
batch_normalization_100 (BatchN (None, 12, 12, 192) 576 conv2d_100[0][0]
__________________________________________________________________________________________________
batch_normalization_103 (BatchN (None, 12, 12, 192) 576 conv2d_103[0][0]
__________________________________________________________________________________________________
activation_100 (Activation) (None, 12, 12, 192) 0 batch_normalization_100[0][0]
__________________________________________________________________________________________________
activation_103 (Activation) (None, 12, 12, 192) 0 batch_normalization_103[0][0]
__________________________________________________________________________________________________
block17_7_mixed (Concatenate) (None, 12, 12, 384) 0 activation_100[0][0]
activation_103[0][0]
__________________________________________________________________________________________________
block17_7_conv (Conv2D) (None, 12, 12, 1088) 418880 block17_7_mixed[0][0]
__________________________________________________________________________________________________
block17_7 (Lambda) (None, 12, 12, 1088) 0 block17_6_ac[0][0]
block17_7_conv[0][0]
__________________________________________________________________________________________________
block17_7_ac (Activation) (None, 12, 12, 1088) 0 block17_7[0][0]
__________________________________________________________________________________________________
conv2d_105 (Conv2D) (None, 12, 12, 128) 139264 block17_7_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_105 (BatchN (None, 12, 12, 128) 384 conv2d_105[0][0]
__________________________________________________________________________________________________
activation_105 (Activation) (None, 12, 12, 128) 0 batch_normalization_105[0][0]
__________________________________________________________________________________________________
conv2d_106 (Conv2D) (None, 12, 12, 160) 143360 activation_105[0][0]
__________________________________________________________________________________________________
batch_normalization_106 (BatchN (None, 12, 12, 160) 480 conv2d_106[0][0]
__________________________________________________________________________________________________
activation_106 (Activation) (None, 12, 12, 160) 0 batch_normalization_106[0][0]
__________________________________________________________________________________________________
conv2d_104 (Conv2D) (None, 12, 12, 192) 208896 block17_7_ac[0][0]
__________________________________________________________________________________________________
conv2d_107 (Conv2D) (None, 12, 12, 192) 215040 activation_106[0][0]
__________________________________________________________________________________________________
batch_normalization_104 (BatchN (None, 12, 12, 192) 576 conv2d_104[0][0]
__________________________________________________________________________________________________
batch_normalization_107 (BatchN (None, 12, 12, 192) 576 conv2d_107[0][0]
__________________________________________________________________________________________________
activation_104 (Activation) (None, 12, 12, 192) 0 batch_normalization_104[0][0]
__________________________________________________________________________________________________
activation_107 (Activation) (None, 12, 12, 192) 0 batch_normalization_107[0][0]
__________________________________________________________________________________________________
block17_8_mixed (Concatenate) (None, 12, 12, 384) 0 activation_104[0][0]
activation_107[0][0]
__________________________________________________________________________________________________
block17_8_conv (Conv2D) (None, 12, 12, 1088) 418880 block17_8_mixed[0][0]
__________________________________________________________________________________________________
block17_8 (Lambda) (None, 12, 12, 1088) 0 block17_7_ac[0][0]
block17_8_conv[0][0]
__________________________________________________________________________________________________
block17_8_ac (Activation) (None, 12, 12, 1088) 0 block17_8[0][0]
__________________________________________________________________________________________________
conv2d_109 (Conv2D) (None, 12, 12, 128) 139264 block17_8_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_109 (BatchN (None, 12, 12, 128) 384 conv2d_109[0][0]
__________________________________________________________________________________________________
activation_109 (Activation) (None, 12, 12, 128) 0 batch_normalization_109[0][0]
__________________________________________________________________________________________________
conv2d_110 (Conv2D) (None, 12, 12, 160) 143360 activation_109[0][0]
__________________________________________________________________________________________________
batch_normalization_110 (BatchN (None, 12, 12, 160) 480 conv2d_110[0][0]
__________________________________________________________________________________________________
activation_110 (Activation) (None, 12, 12, 160) 0 batch_normalization_110[0][0]
__________________________________________________________________________________________________
conv2d_108 (Conv2D) (None, 12, 12, 192) 208896 block17_8_ac[0][0]
__________________________________________________________________________________________________
conv2d_111 (Conv2D) (None, 12, 12, 192) 215040 activation_110[0][0]
__________________________________________________________________________________________________
batch_normalization_108 (BatchN (None, 12, 12, 192) 576 conv2d_108[0][0]
__________________________________________________________________________________________________
batch_normalization_111 (BatchN (None, 12, 12, 192) 576 conv2d_111[0][0]
__________________________________________________________________________________________________
activation_108 (Activation) (None, 12, 12, 192) 0 batch_normalization_108[0][0]
__________________________________________________________________________________________________
activation_111 (Activation) (None, 12, 12, 192) 0 batch_normalization_111[0][0]
__________________________________________________________________________________________________
block17_9_mixed (Concatenate) (None, 12, 12, 384) 0 activation_108[0][0]
activation_111[0][0]
__________________________________________________________________________________________________
block17_9_conv (Conv2D) (None, 12, 12, 1088) 418880 block17_9_mixed[0][0]
__________________________________________________________________________________________________
block17_9 (Lambda) (None, 12, 12, 1088) 0 block17_8_ac[0][0]
block17_9_conv[0][0]
__________________________________________________________________________________________________
block17_9_ac (Activation) (None, 12, 12, 1088) 0 block17_9[0][0]
__________________________________________________________________________________________________
conv2d_113 (Conv2D) (None, 12, 12, 128) 139264 block17_9_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_113 (BatchN (None, 12, 12, 128) 384 conv2d_113[0][0]
__________________________________________________________________________________________________
activation_113 (Activation) (None, 12, 12, 128) 0 batch_normalization_113[0][0]
__________________________________________________________________________________________________
conv2d_114 (Conv2D) (None, 12, 12, 160) 143360 activation_113[0][0]
__________________________________________________________________________________________________
batch_normalization_114 (BatchN (None, 12, 12, 160) 480 conv2d_114[0][0]
__________________________________________________________________________________________________
activation_114 (Activation) (None, 12, 12, 160) 0 batch_normalization_114[0][0]
__________________________________________________________________________________________________
conv2d_112 (Conv2D) (None, 12, 12, 192) 208896 block17_9_ac[0][0]
__________________________________________________________________________________________________
conv2d_115 (Conv2D) (None, 12, 12, 192) 215040 activation_114[0][0]
__________________________________________________________________________________________________
batch_normalization_112 (BatchN (None, 12, 12, 192) 576 conv2d_112[0][0]
__________________________________________________________________________________________________
batch_normalization_115 (BatchN (None, 12, 12, 192) 576 conv2d_115[0][0]
__________________________________________________________________________________________________
activation_112 (Activation) (None, 12, 12, 192) 0 batch_normalization_112[0][0]
__________________________________________________________________________________________________
activation_115 (Activation) (None, 12, 12, 192) 0 batch_normalization_115[0][0]
__________________________________________________________________________________________________
block17_10_mixed (Concatenate) (None, 12, 12, 384) 0 activation_112[0][0]
activation_115[0][0]
__________________________________________________________________________________________________
block17_10_conv (Conv2D) (None, 12, 12, 1088) 418880 block17_10_mixed[0][0]
__________________________________________________________________________________________________
block17_10 (Lambda) (None, 12, 12, 1088) 0 block17_9_ac[0][0]
block17_10_conv[0][0]
__________________________________________________________________________________________________
block17_10_ac (Activation) (None, 12, 12, 1088) 0 block17_10[0][0]
__________________________________________________________________________________________________
conv2d_117 (Conv2D) (None, 12, 12, 128) 139264 block17_10_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_117 (BatchN (None, 12, 12, 128) 384 conv2d_117[0][0]
__________________________________________________________________________________________________
activation_117 (Activation) (None, 12, 12, 128) 0 batch_normalization_117[0][0]
__________________________________________________________________________________________________
conv2d_118 (Conv2D) (None, 12, 12, 160) 143360 activation_117[0][0]
__________________________________________________________________________________________________
batch_normalization_118 (BatchN (None, 12, 12, 160) 480 conv2d_118[0][0]
__________________________________________________________________________________________________
activation_118 (Activation) (None, 12, 12, 160) 0 batch_normalization_118[0][0]
__________________________________________________________________________________________________
conv2d_116 (Conv2D) (None, 12, 12, 192) 208896 block17_10_ac[0][0]
__________________________________________________________________________________________________
conv2d_119 (Conv2D) (None, 12, 12, 192) 215040 activation_118[0][0]
__________________________________________________________________________________________________
batch_normalization_116 (BatchN (None, 12, 12, 192) 576 conv2d_116[0][0]
__________________________________________________________________________________________________
batch_normalization_119 (BatchN (None, 12, 12, 192) 576 conv2d_119[0][0]
__________________________________________________________________________________________________
activation_116 (Activation) (None, 12, 12, 192) 0 batch_normalization_116[0][0]
__________________________________________________________________________________________________
activation_119 (Activation) (None, 12, 12, 192) 0 batch_normalization_119[0][0]
__________________________________________________________________________________________________
block17_11_mixed (Concatenate) (None, 12, 12, 384) 0 activation_116[0][0]
activation_119[0][0]
__________________________________________________________________________________________________
block17_11_conv (Conv2D) (None, 12, 12, 1088) 418880 block17_11_mixed[0][0]
__________________________________________________________________________________________________
block17_11 (Lambda) (None, 12, 12, 1088) 0 block17_10_ac[0][0]
block17_11_conv[0][0]
__________________________________________________________________________________________________
block17_11_ac (Activation) (None, 12, 12, 1088) 0 block17_11[0][0]
__________________________________________________________________________________________________
conv2d_121 (Conv2D) (None, 12, 12, 128) 139264 block17_11_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_121 (BatchN (None, 12, 12, 128) 384 conv2d_121[0][0]
__________________________________________________________________________________________________
activation_121 (Activation) (None, 12, 12, 128) 0 batch_normalization_121[0][0]
__________________________________________________________________________________________________
conv2d_122 (Conv2D) (None, 12, 12, 160) 143360 activation_121[0][0]
__________________________________________________________________________________________________
batch_normalization_122 (BatchN (None, 12, 12, 160) 480 conv2d_122[0][0]
__________________________________________________________________________________________________
activation_122 (Activation) (None, 12, 12, 160) 0 batch_normalization_122[0][0]
__________________________________________________________________________________________________
conv2d_120 (Conv2D) (None, 12, 12, 192) 208896 block17_11_ac[0][0]
__________________________________________________________________________________________________
conv2d_123 (Conv2D) (None, 12, 12, 192) 215040 activation_122[0][0]
__________________________________________________________________________________________________
batch_normalization_120 (BatchN (None, 12, 12, 192) 576 conv2d_120[0][0]
__________________________________________________________________________________________________
batch_normalization_123 (BatchN (None, 12, 12, 192) 576 conv2d_123[0][0]
__________________________________________________________________________________________________
activation_120 (Activation) (None, 12, 12, 192) 0 batch_normalization_120[0][0]
__________________________________________________________________________________________________
activation_123 (Activation) (None, 12, 12, 192) 0 batch_normalization_123[0][0]
__________________________________________________________________________________________________
block17_12_mixed (Concatenate) (None, 12, 12, 384) 0 activation_120[0][0]
activation_123[0][0]
__________________________________________________________________________________________________
block17_12_conv (Conv2D) (None, 12, 12, 1088) 418880 block17_12_mixed[0][0]
__________________________________________________________________________________________________
block17_12 (Lambda) (None, 12, 12, 1088) 0 block17_11_ac[0][0]
block17_12_conv[0][0]
__________________________________________________________________________________________________
block17_12_ac (Activation) (None, 12, 12, 1088) 0 block17_12[0][0]
__________________________________________________________________________________________________
conv2d_125 (Conv2D) (None, 12, 12, 128) 139264 block17_12_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_125 (BatchN (None, 12, 12, 128) 384 conv2d_125[0][0]
__________________________________________________________________________________________________
activation_125 (Activation) (None, 12, 12, 128) 0 batch_normalization_125[0][0]
__________________________________________________________________________________________________
conv2d_126 (Conv2D) (None, 12, 12, 160) 143360 activation_125[0][0]
__________________________________________________________________________________________________
batch_normalization_126 (BatchN (None, 12, 12, 160) 480 conv2d_126[0][0]
__________________________________________________________________________________________________
activation_126 (Activation) (None, 12, 12, 160) 0 batch_normalization_126[0][0]
__________________________________________________________________________________________________
conv2d_124 (Conv2D) (None, 12, 12, 192) 208896 block17_12_ac[0][0]
__________________________________________________________________________________________________
conv2d_127 (Conv2D) (None, 12, 12, 192) 215040 activation_126[0][0]
__________________________________________________________________________________________________
batch_normalization_124 (BatchN (None, 12, 12, 192) 576 conv2d_124[0][0]
__________________________________________________________________________________________________
batch_normalization_127 (BatchN (None, 12, 12, 192) 576 conv2d_127[0][0]
__________________________________________________________________________________________________
activation_124 (Activation) (None, 12, 12, 192) 0 batch_normalization_124[0][0]
__________________________________________________________________________________________________
activation_127 (Activation) (None, 12, 12, 192) 0 batch_normalization_127[0][0]
__________________________________________________________________________________________________
block17_13_mixed (Concatenate) (None, 12, 12, 384) 0 activation_124[0][0]
activation_127[0][0]
__________________________________________________________________________________________________
block17_13_conv (Conv2D) (None, 12, 12, 1088) 418880 block17_13_mixed[0][0]
__________________________________________________________________________________________________
block17_13 (Lambda) (None, 12, 12, 1088) 0 block17_12_ac[0][0]
block17_13_conv[0][0]
__________________________________________________________________________________________________
block17_13_ac (Activation) (None, 12, 12, 1088) 0 block17_13[0][0]
__________________________________________________________________________________________________
conv2d_129 (Conv2D) (None, 12, 12, 128) 139264 block17_13_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_129 (BatchN (None, 12, 12, 128) 384 conv2d_129[0][0]
__________________________________________________________________________________________________
activation_129 (Activation) (None, 12, 12, 128) 0 batch_normalization_129[0][0]
__________________________________________________________________________________________________
conv2d_130 (Conv2D) (None, 12, 12, 160) 143360 activation_129[0][0]
__________________________________________________________________________________________________
batch_normalization_130 (BatchN (None, 12, 12, 160) 480 conv2d_130[0][0]
__________________________________________________________________________________________________
activation_130 (Activation) (None, 12, 12, 160) 0 batch_normalization_130[0][0]
__________________________________________________________________________________________________
conv2d_128 (Conv2D) (None, 12, 12, 192) 208896 block17_13_ac[0][0]
__________________________________________________________________________________________________
conv2d_131 (Conv2D) (None, 12, 12, 192) 215040 activation_130[0][0]
__________________________________________________________________________________________________
batch_normalization_128 (BatchN (None, 12, 12, 192) 576 conv2d_128[0][0]
__________________________________________________________________________________________________
batch_normalization_131 (BatchN (None, 12, 12, 192) 576 conv2d_131[0][0]
__________________________________________________________________________________________________
activation_128 (Activation) (None, 12, 12, 192) 0 batch_normalization_128[0][0]
__________________________________________________________________________________________________
activation_131 (Activation) (None, 12, 12, 192) 0 batch_normalization_131[0][0]
__________________________________________________________________________________________________
block17_14_mixed (Concatenate) (None, 12, 12, 384) 0 activation_128[0][0]
activation_131[0][0]
__________________________________________________________________________________________________
block17_14_conv (Conv2D) (None, 12, 12, 1088) 418880 block17_14_mixed[0][0]
__________________________________________________________________________________________________
block17_14 (Lambda) (None, 12, 12, 1088) 0 block17_13_ac[0][0]
block17_14_conv[0][0]
__________________________________________________________________________________________________
block17_14_ac (Activation) (None, 12, 12, 1088) 0 block17_14[0][0]
__________________________________________________________________________________________________
conv2d_133 (Conv2D) (None, 12, 12, 128) 139264 block17_14_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_133 (BatchN (None, 12, 12, 128) 384 conv2d_133[0][0]
__________________________________________________________________________________________________
activation_133 (Activation) (None, 12, 12, 128) 0 batch_normalization_133[0][0]
__________________________________________________________________________________________________
conv2d_134 (Conv2D) (None, 12, 12, 160) 143360 activation_133[0][0]
__________________________________________________________________________________________________
batch_normalization_134 (BatchN (None, 12, 12, 160) 480 conv2d_134[0][0]
__________________________________________________________________________________________________
activation_134 (Activation) (None, 12, 12, 160) 0 batch_normalization_134[0][0]
__________________________________________________________________________________________________
conv2d_132 (Conv2D) (None, 12, 12, 192) 208896 block17_14_ac[0][0]
__________________________________________________________________________________________________
conv2d_135 (Conv2D) (None, 12, 12, 192) 215040 activation_134[0][0]
__________________________________________________________________________________________________
batch_normalization_132 (BatchN (None, 12, 12, 192) 576 conv2d_132[0][0]
__________________________________________________________________________________________________
batch_normalization_135 (BatchN (None, 12, 12, 192) 576 conv2d_135[0][0]
__________________________________________________________________________________________________
activation_132 (Activation) (None, 12, 12, 192) 0 batch_normalization_132[0][0]
__________________________________________________________________________________________________
activation_135 (Activation) (None, 12, 12, 192) 0 batch_normalization_135[0][0]
__________________________________________________________________________________________________
block17_15_mixed (Concatenate) (None, 12, 12, 384) 0 activation_132[0][0]
activation_135[0][0]
__________________________________________________________________________________________________
block17_15_conv (Conv2D) (None, 12, 12, 1088) 418880 block17_15_mixed[0][0]
__________________________________________________________________________________________________
block17_15 (Lambda) (None, 12, 12, 1088) 0 block17_14_ac[0][0]
block17_15_conv[0][0]
__________________________________________________________________________________________________
block17_15_ac (Activation) (None, 12, 12, 1088) 0 block17_15[0][0]
__________________________________________________________________________________________________
conv2d_137 (Conv2D) (None, 12, 12, 128) 139264 block17_15_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_137 (BatchN (None, 12, 12, 128) 384 conv2d_137[0][0]
__________________________________________________________________________________________________
activation_137 (Activation) (None, 12, 12, 128) 0 batch_normalization_137[0][0]
__________________________________________________________________________________________________
conv2d_138 (Conv2D) (None, 12, 12, 160) 143360 activation_137[0][0]
__________________________________________________________________________________________________
batch_normalization_138 (BatchN (None, 12, 12, 160) 480 conv2d_138[0][0]
__________________________________________________________________________________________________
activation_138 (Activation) (None, 12, 12, 160) 0 batch_normalization_138[0][0]
__________________________________________________________________________________________________
conv2d_136 (Conv2D) (None, 12, 12, 192) 208896 block17_15_ac[0][0]
__________________________________________________________________________________________________
conv2d_139 (Conv2D) (None, 12, 12, 192) 215040 activation_138[0][0]
__________________________________________________________________________________________________
batch_normalization_136 (BatchN (None, 12, 12, 192) 576 conv2d_136[0][0]
__________________________________________________________________________________________________
batch_normalization_139 (BatchN (None, 12, 12, 192) 576 conv2d_139[0][0]
__________________________________________________________________________________________________
activation_136 (Activation) (None, 12, 12, 192) 0 batch_normalization_136[0][0]
__________________________________________________________________________________________________
activation_139 (Activation) (None, 12, 12, 192) 0 batch_normalization_139[0][0]
__________________________________________________________________________________________________
block17_16_mixed (Concatenate) (None, 12, 12, 384) 0 activation_136[0][0]
activation_139[0][0]
__________________________________________________________________________________________________
block17_16_conv (Conv2D) (None, 12, 12, 1088) 418880 block17_16_mixed[0][0]
__________________________________________________________________________________________________
block17_16 (Lambda) (None, 12, 12, 1088) 0 block17_15_ac[0][0]
block17_16_conv[0][0]
__________________________________________________________________________________________________
block17_16_ac (Activation) (None, 12, 12, 1088) 0 block17_16[0][0]
__________________________________________________________________________________________________
conv2d_141 (Conv2D) (None, 12, 12, 128) 139264 block17_16_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_141 (BatchN (None, 12, 12, 128) 384 conv2d_141[0][0]
__________________________________________________________________________________________________
activation_141 (Activation) (None, 12, 12, 128) 0 batch_normalization_141[0][0]
__________________________________________________________________________________________________
conv2d_142 (Conv2D) (None, 12, 12, 160) 143360 activation_141[0][0]
__________________________________________________________________________________________________
batch_normalization_142 (BatchN (None, 12, 12, 160) 480 conv2d_142[0][0]
__________________________________________________________________________________________________
activation_142 (Activation) (None, 12, 12, 160) 0 batch_normalization_142[0][0]
__________________________________________________________________________________________________
conv2d_140 (Conv2D) (None, 12, 12, 192) 208896 block17_16_ac[0][0]
__________________________________________________________________________________________________
conv2d_143 (Conv2D) (None, 12, 12, 192) 215040 activation_142[0][0]
__________________________________________________________________________________________________
batch_normalization_140 (BatchN (None, 12, 12, 192) 576 conv2d_140[0][0]
__________________________________________________________________________________________________
batch_normalization_143 (BatchN (None, 12, 12, 192) 576 conv2d_143[0][0]
__________________________________________________________________________________________________
activation_140 (Activation) (None, 12, 12, 192) 0 batch_normalization_140[0][0]
__________________________________________________________________________________________________
activation_143 (Activation) (None, 12, 12, 192) 0 batch_normalization_143[0][0]
__________________________________________________________________________________________________
block17_17_mixed (Concatenate) (None, 12, 12, 384) 0 activation_140[0][0]
activation_143[0][0]
__________________________________________________________________________________________________
block17_17_conv (Conv2D) (None, 12, 12, 1088) 418880 block17_17_mixed[0][0]
__________________________________________________________________________________________________
block17_17 (Lambda) (None, 12, 12, 1088) 0 block17_16_ac[0][0]
block17_17_conv[0][0]
__________________________________________________________________________________________________
block17_17_ac (Activation) (None, 12, 12, 1088) 0 block17_17[0][0]
__________________________________________________________________________________________________
conv2d_145 (Conv2D) (None, 12, 12, 128) 139264 block17_17_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_145 (BatchN (None, 12, 12, 128) 384 conv2d_145[0][0]
__________________________________________________________________________________________________
activation_145 (Activation) (None, 12, 12, 128) 0 batch_normalization_145[0][0]
__________________________________________________________________________________________________
conv2d_146 (Conv2D) (None, 12, 12, 160) 143360 activation_145[0][0]
__________________________________________________________________________________________________
batch_normalization_146 (BatchN (None, 12, 12, 160) 480 conv2d_146[0][0]
__________________________________________________________________________________________________
activation_146 (Activation) (None, 12, 12, 160) 0 batch_normalization_146[0][0]
__________________________________________________________________________________________________
conv2d_144 (Conv2D) (None, 12, 12, 192) 208896 block17_17_ac[0][0]
__________________________________________________________________________________________________
conv2d_147 (Conv2D) (None, 12, 12, 192) 215040 activation_146[0][0]
__________________________________________________________________________________________________
batch_normalization_144 (BatchN (None, 12, 12, 192) 576 conv2d_144[0][0]
__________________________________________________________________________________________________
batch_normalization_147 (BatchN (None, 12, 12, 192) 576 conv2d_147[0][0]
__________________________________________________________________________________________________
activation_144 (Activation) (None, 12, 12, 192) 0 batch_normalization_144[0][0]
__________________________________________________________________________________________________
activation_147 (Activation) (None, 12, 12, 192) 0 batch_normalization_147[0][0]
__________________________________________________________________________________________________
block17_18_mixed (Concatenate) (None, 12, 12, 384) 0 activation_144[0][0]
activation_147[0][0]
__________________________________________________________________________________________________
block17_18_conv (Conv2D) (None, 12, 12, 1088) 418880 block17_18_mixed[0][0]
__________________________________________________________________________________________________
block17_18 (Lambda) (None, 12, 12, 1088) 0 block17_17_ac[0][0]
block17_18_conv[0][0]
__________________________________________________________________________________________________
block17_18_ac (Activation) (None, 12, 12, 1088) 0 block17_18[0][0]
__________________________________________________________________________________________________
conv2d_149 (Conv2D) (None, 12, 12, 128) 139264 block17_18_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_149 (BatchN (None, 12, 12, 128) 384 conv2d_149[0][0]
__________________________________________________________________________________________________
activation_149 (Activation) (None, 12, 12, 128) 0 batch_normalization_149[0][0]
__________________________________________________________________________________________________
conv2d_150 (Conv2D) (None, 12, 12, 160) 143360 activation_149[0][0]
__________________________________________________________________________________________________
batch_normalization_150 (BatchN (None, 12, 12, 160) 480 conv2d_150[0][0]
__________________________________________________________________________________________________
activation_150 (Activation) (None, 12, 12, 160) 0 batch_normalization_150[0][0]
__________________________________________________________________________________________________
conv2d_148 (Conv2D) (None, 12, 12, 192) 208896 block17_18_ac[0][0]
__________________________________________________________________________________________________
conv2d_151 (Conv2D) (None, 12, 12, 192) 215040 activation_150[0][0]
__________________________________________________________________________________________________
batch_normalization_148 (BatchN (None, 12, 12, 192) 576 conv2d_148[0][0]
__________________________________________________________________________________________________
batch_normalization_151 (BatchN (None, 12, 12, 192) 576 conv2d_151[0][0]
__________________________________________________________________________________________________
activation_148 (Activation) (None, 12, 12, 192) 0 batch_normalization_148[0][0]
__________________________________________________________________________________________________
activation_151 (Activation) (None, 12, 12, 192) 0 batch_normalization_151[0][0]
__________________________________________________________________________________________________
block17_19_mixed (Concatenate) (None, 12, 12, 384) 0 activation_148[0][0]
activation_151[0][0]
__________________________________________________________________________________________________
block17_19_conv (Conv2D) (None, 12, 12, 1088) 418880 block17_19_mixed[0][0]
__________________________________________________________________________________________________
block17_19 (Lambda) (None, 12, 12, 1088) 0 block17_18_ac[0][0]
block17_19_conv[0][0]
__________________________________________________________________________________________________
block17_19_ac (Activation) (None, 12, 12, 1088) 0 block17_19[0][0]
__________________________________________________________________________________________________
conv2d_153 (Conv2D) (None, 12, 12, 128) 139264 block17_19_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_153 (BatchN (None, 12, 12, 128) 384 conv2d_153[0][0]
__________________________________________________________________________________________________
activation_153 (Activation) (None, 12, 12, 128) 0 batch_normalization_153[0][0]
__________________________________________________________________________________________________
conv2d_154 (Conv2D) (None, 12, 12, 160) 143360 activation_153[0][0]
__________________________________________________________________________________________________
batch_normalization_154 (BatchN (None, 12, 12, 160) 480 conv2d_154[0][0]
__________________________________________________________________________________________________
activation_154 (Activation) (None, 12, 12, 160) 0 batch_normalization_154[0][0]
__________________________________________________________________________________________________
conv2d_152 (Conv2D) (None, 12, 12, 192) 208896 block17_19_ac[0][0]
__________________________________________________________________________________________________
conv2d_155 (Conv2D) (None, 12, 12, 192) 215040 activation_154[0][0]
__________________________________________________________________________________________________
batch_normalization_152 (BatchN (None, 12, 12, 192) 576 conv2d_152[0][0]
__________________________________________________________________________________________________
batch_normalization_155 (BatchN (None, 12, 12, 192) 576 conv2d_155[0][0]
__________________________________________________________________________________________________
activation_152 (Activation) (None, 12, 12, 192) 0 batch_normalization_152[0][0]
__________________________________________________________________________________________________
activation_155 (Activation) (None, 12, 12, 192) 0 batch_normalization_155[0][0]
__________________________________________________________________________________________________
block17_20_mixed (Concatenate) (None, 12, 12, 384) 0 activation_152[0][0]
activation_155[0][0]
__________________________________________________________________________________________________
block17_20_conv (Conv2D) (None, 12, 12, 1088) 418880 block17_20_mixed[0][0]
__________________________________________________________________________________________________
block17_20 (Lambda) (None, 12, 12, 1088) 0 block17_19_ac[0][0]
block17_20_conv[0][0]
__________________________________________________________________________________________________
block17_20_ac (Activation) (None, 12, 12, 1088) 0 block17_20[0][0]
__________________________________________________________________________________________________
conv2d_160 (Conv2D) (None, 12, 12, 256) 278528 block17_20_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_160 (BatchN (None, 12, 12, 256) 768 conv2d_160[0][0]
__________________________________________________________________________________________________
activation_160 (Activation) (None, 12, 12, 256) 0 batch_normalization_160[0][0]
__________________________________________________________________________________________________
conv2d_156 (Conv2D) (None, 12, 12, 256) 278528 block17_20_ac[0][0]
__________________________________________________________________________________________________
conv2d_158 (Conv2D) (None, 12, 12, 256) 278528 block17_20_ac[0][0]
__________________________________________________________________________________________________
conv2d_161 (Conv2D) (None, 12, 12, 288) 663552 activation_160[0][0]
__________________________________________________________________________________________________
batch_normalization_156 (BatchN (None, 12, 12, 256) 768 conv2d_156[0][0]
__________________________________________________________________________________________________
batch_normalization_158 (BatchN (None, 12, 12, 256) 768 conv2d_158[0][0]
__________________________________________________________________________________________________
batch_normalization_161 (BatchN (None, 12, 12, 288) 864 conv2d_161[0][0]
__________________________________________________________________________________________________
activation_156 (Activation) (None, 12, 12, 256) 0 batch_normalization_156[0][0]
__________________________________________________________________________________________________
activation_158 (Activation) (None, 12, 12, 256) 0 batch_normalization_158[0][0]
__________________________________________________________________________________________________
activation_161 (Activation) (None, 12, 12, 288) 0 batch_normalization_161[0][0]
__________________________________________________________________________________________________
conv2d_157 (Conv2D) (None, 5, 5, 384) 884736 activation_156[0][0]
__________________________________________________________________________________________________
conv2d_159 (Conv2D) (None, 5, 5, 288) 663552 activation_158[0][0]
__________________________________________________________________________________________________
conv2d_162 (Conv2D) (None, 5, 5, 320) 829440 activation_161[0][0]
__________________________________________________________________________________________________
batch_normalization_157 (BatchN (None, 5, 5, 384) 1152 conv2d_157[0][0]
__________________________________________________________________________________________________
batch_normalization_159 (BatchN (None, 5, 5, 288) 864 conv2d_159[0][0]
__________________________________________________________________________________________________
batch_normalization_162 (BatchN (None, 5, 5, 320) 960 conv2d_162[0][0]
__________________________________________________________________________________________________
activation_157 (Activation) (None, 5, 5, 384) 0 batch_normalization_157[0][0]
__________________________________________________________________________________________________
activation_159 (Activation) (None, 5, 5, 288) 0 batch_normalization_159[0][0]
__________________________________________________________________________________________________
activation_162 (Activation) (None, 5, 5, 320) 0 batch_normalization_162[0][0]
__________________________________________________________________________________________________
max_pooling2d_3 (MaxPooling2D) (None, 5, 5, 1088) 0 block17_20_ac[0][0]
__________________________________________________________________________________________________
mixed_7a (Concatenate) (None, 5, 5, 2080) 0 activation_157[0][0]
activation_159[0][0]
activation_162[0][0]
max_pooling2d_3[0][0]
__________________________________________________________________________________________________
conv2d_164 (Conv2D) (None, 5, 5, 192) 399360 mixed_7a[0][0]
__________________________________________________________________________________________________
batch_normalization_164 (BatchN (None, 5, 5, 192) 576 conv2d_164[0][0]
__________________________________________________________________________________________________
activation_164 (Activation) (None, 5, 5, 192) 0 batch_normalization_164[0][0]
__________________________________________________________________________________________________
conv2d_165 (Conv2D) (None, 5, 5, 224) 129024 activation_164[0][0]
__________________________________________________________________________________________________
batch_normalization_165 (BatchN (None, 5, 5, 224) 672 conv2d_165[0][0]
__________________________________________________________________________________________________
activation_165 (Activation) (None, 5, 5, 224) 0 batch_normalization_165[0][0]
__________________________________________________________________________________________________
conv2d_163 (Conv2D) (None, 5, 5, 192) 399360 mixed_7a[0][0]
__________________________________________________________________________________________________
conv2d_166 (Conv2D) (None, 5, 5, 256) 172032 activation_165[0][0]
__________________________________________________________________________________________________
batch_normalization_163 (BatchN (None, 5, 5, 192) 576 conv2d_163[0][0]
__________________________________________________________________________________________________
batch_normalization_166 (BatchN (None, 5, 5, 256) 768 conv2d_166[0][0]
__________________________________________________________________________________________________
activation_163 (Activation) (None, 5, 5, 192) 0 batch_normalization_163[0][0]
__________________________________________________________________________________________________
activation_166 (Activation) (None, 5, 5, 256) 0 batch_normalization_166[0][0]
__________________________________________________________________________________________________
block8_1_mixed (Concatenate) (None, 5, 5, 448) 0 activation_163[0][0]
activation_166[0][0]
__________________________________________________________________________________________________
block8_1_conv (Conv2D) (None, 5, 5, 2080) 933920 block8_1_mixed[0][0]
__________________________________________________________________________________________________
block8_1 (Lambda) (None, 5, 5, 2080) 0 mixed_7a[0][0]
block8_1_conv[0][0]
__________________________________________________________________________________________________
block8_1_ac (Activation) (None, 5, 5, 2080) 0 block8_1[0][0]
__________________________________________________________________________________________________
conv2d_168 (Conv2D) (None, 5, 5, 192) 399360 block8_1_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_168 (BatchN (None, 5, 5, 192) 576 conv2d_168[0][0]
__________________________________________________________________________________________________
activation_168 (Activation) (None, 5, 5, 192) 0 batch_normalization_168[0][0]
__________________________________________________________________________________________________
conv2d_169 (Conv2D) (None, 5, 5, 224) 129024 activation_168[0][0]
__________________________________________________________________________________________________
batch_normalization_169 (BatchN (None, 5, 5, 224) 672 conv2d_169[0][0]
__________________________________________________________________________________________________
activation_169 (Activation) (None, 5, 5, 224) 0 batch_normalization_169[0][0]
__________________________________________________________________________________________________
conv2d_167 (Conv2D) (None, 5, 5, 192) 399360 block8_1_ac[0][0]
__________________________________________________________________________________________________
conv2d_170 (Conv2D) (None, 5, 5, 256) 172032 activation_169[0][0]
__________________________________________________________________________________________________
batch_normalization_167 (BatchN (None, 5, 5, 192) 576 conv2d_167[0][0]
__________________________________________________________________________________________________
batch_normalization_170 (BatchN (None, 5, 5, 256) 768 conv2d_170[0][0]
__________________________________________________________________________________________________
activation_167 (Activation) (None, 5, 5, 192) 0 batch_normalization_167[0][0]
__________________________________________________________________________________________________
activation_170 (Activation) (None, 5, 5, 256) 0 batch_normalization_170[0][0]
__________________________________________________________________________________________________
block8_2_mixed (Concatenate) (None, 5, 5, 448) 0 activation_167[0][0]
activation_170[0][0]
__________________________________________________________________________________________________
block8_2_conv (Conv2D) (None, 5, 5, 2080) 933920 block8_2_mixed[0][0]
__________________________________________________________________________________________________
block8_2 (Lambda) (None, 5, 5, 2080) 0 block8_1_ac[0][0]
block8_2_conv[0][0]
__________________________________________________________________________________________________
block8_2_ac (Activation) (None, 5, 5, 2080) 0 block8_2[0][0]
__________________________________________________________________________________________________
conv2d_172 (Conv2D) (None, 5, 5, 192) 399360 block8_2_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_172 (BatchN (None, 5, 5, 192) 576 conv2d_172[0][0]
__________________________________________________________________________________________________
activation_172 (Activation) (None, 5, 5, 192) 0 batch_normalization_172[0][0]
__________________________________________________________________________________________________
conv2d_173 (Conv2D) (None, 5, 5, 224) 129024 activation_172[0][0]
__________________________________________________________________________________________________
batch_normalization_173 (BatchN (None, 5, 5, 224) 672 conv2d_173[0][0]
__________________________________________________________________________________________________
activation_173 (Activation) (None, 5, 5, 224) 0 batch_normalization_173[0][0]
__________________________________________________________________________________________________
conv2d_171 (Conv2D) (None, 5, 5, 192) 399360 block8_2_ac[0][0]
__________________________________________________________________________________________________
conv2d_174 (Conv2D) (None, 5, 5, 256) 172032 activation_173[0][0]
__________________________________________________________________________________________________
batch_normalization_171 (BatchN (None, 5, 5, 192) 576 conv2d_171[0][0]
__________________________________________________________________________________________________
batch_normalization_174 (BatchN (None, 5, 5, 256) 768 conv2d_174[0][0]
__________________________________________________________________________________________________
activation_171 (Activation) (None, 5, 5, 192) 0 batch_normalization_171[0][0]
__________________________________________________________________________________________________
activation_174 (Activation) (None, 5, 5, 256) 0 batch_normalization_174[0][0]
__________________________________________________________________________________________________
block8_3_mixed (Concatenate) (None, 5, 5, 448) 0 activation_171[0][0]
activation_174[0][0]
__________________________________________________________________________________________________
block8_3_conv (Conv2D) (None, 5, 5, 2080) 933920 block8_3_mixed[0][0]
__________________________________________________________________________________________________
block8_3 (Lambda) (None, 5, 5, 2080) 0 block8_2_ac[0][0]
block8_3_conv[0][0]
__________________________________________________________________________________________________
block8_3_ac (Activation) (None, 5, 5, 2080) 0 block8_3[0][0]
__________________________________________________________________________________________________
conv2d_176 (Conv2D) (None, 5, 5, 192) 399360 block8_3_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_176 (BatchN (None, 5, 5, 192) 576 conv2d_176[0][0]
__________________________________________________________________________________________________
activation_176 (Activation) (None, 5, 5, 192) 0 batch_normalization_176[0][0]
__________________________________________________________________________________________________
conv2d_177 (Conv2D) (None, 5, 5, 224) 129024 activation_176[0][0]
__________________________________________________________________________________________________
batch_normalization_177 (BatchN (None, 5, 5, 224) 672 conv2d_177[0][0]
__________________________________________________________________________________________________
activation_177 (Activation) (None, 5, 5, 224) 0 batch_normalization_177[0][0]
__________________________________________________________________________________________________
conv2d_175 (Conv2D) (None, 5, 5, 192) 399360 block8_3_ac[0][0]
__________________________________________________________________________________________________
conv2d_178 (Conv2D) (None, 5, 5, 256) 172032 activation_177[0][0]
__________________________________________________________________________________________________
batch_normalization_175 (BatchN (None, 5, 5, 192) 576 conv2d_175[0][0]
__________________________________________________________________________________________________
batch_normalization_178 (BatchN (None, 5, 5, 256) 768 conv2d_178[0][0]
__________________________________________________________________________________________________
activation_175 (Activation) (None, 5, 5, 192) 0 batch_normalization_175[0][0]
__________________________________________________________________________________________________
activation_178 (Activation) (None, 5, 5, 256) 0 batch_normalization_178[0][0]
__________________________________________________________________________________________________
block8_4_mixed (Concatenate) (None, 5, 5, 448) 0 activation_175[0][0]
activation_178[0][0]
__________________________________________________________________________________________________
block8_4_conv (Conv2D) (None, 5, 5, 2080) 933920 block8_4_mixed[0][0]
__________________________________________________________________________________________________
block8_4 (Lambda) (None, 5, 5, 2080) 0 block8_3_ac[0][0]
block8_4_conv[0][0]
__________________________________________________________________________________________________
block8_4_ac (Activation) (None, 5, 5, 2080) 0 block8_4[0][0]
__________________________________________________________________________________________________
conv2d_180 (Conv2D) (None, 5, 5, 192) 399360 block8_4_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_180 (BatchN (None, 5, 5, 192) 576 conv2d_180[0][0]
__________________________________________________________________________________________________
activation_180 (Activation) (None, 5, 5, 192) 0 batch_normalization_180[0][0]
__________________________________________________________________________________________________
conv2d_181 (Conv2D) (None, 5, 5, 224) 129024 activation_180[0][0]
__________________________________________________________________________________________________
batch_normalization_181 (BatchN (None, 5, 5, 224) 672 conv2d_181[0][0]
__________________________________________________________________________________________________
activation_181 (Activation) (None, 5, 5, 224) 0 batch_normalization_181[0][0]
__________________________________________________________________________________________________
conv2d_179 (Conv2D) (None, 5, 5, 192) 399360 block8_4_ac[0][0]
__________________________________________________________________________________________________
conv2d_182 (Conv2D) (None, 5, 5, 256) 172032 activation_181[0][0]
__________________________________________________________________________________________________
batch_normalization_179 (BatchN (None, 5, 5, 192) 576 conv2d_179[0][0]
__________________________________________________________________________________________________
batch_normalization_182 (BatchN (None, 5, 5, 256) 768 conv2d_182[0][0]
__________________________________________________________________________________________________
activation_179 (Activation) (None, 5, 5, 192) 0 batch_normalization_179[0][0]
__________________________________________________________________________________________________
activation_182 (Activation) (None, 5, 5, 256) 0 batch_normalization_182[0][0]
__________________________________________________________________________________________________
block8_5_mixed (Concatenate) (None, 5, 5, 448) 0 activation_179[0][0]
activation_182[0][0]
__________________________________________________________________________________________________
block8_5_conv (Conv2D) (None, 5, 5, 2080) 933920 block8_5_mixed[0][0]
__________________________________________________________________________________________________
block8_5 (Lambda) (None, 5, 5, 2080) 0 block8_4_ac[0][0]
block8_5_conv[0][0]
__________________________________________________________________________________________________
block8_5_ac (Activation) (None, 5, 5, 2080) 0 block8_5[0][0]
__________________________________________________________________________________________________
conv2d_184 (Conv2D) (None, 5, 5, 192) 399360 block8_5_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_184 (BatchN (None, 5, 5, 192) 576 conv2d_184[0][0]
__________________________________________________________________________________________________
activation_184 (Activation) (None, 5, 5, 192) 0 batch_normalization_184[0][0]
__________________________________________________________________________________________________
conv2d_185 (Conv2D) (None, 5, 5, 224) 129024 activation_184[0][0]
__________________________________________________________________________________________________
batch_normalization_185 (BatchN (None, 5, 5, 224) 672 conv2d_185[0][0]
__________________________________________________________________________________________________
activation_185 (Activation) (None, 5, 5, 224) 0 batch_normalization_185[0][0]
__________________________________________________________________________________________________
conv2d_183 (Conv2D) (None, 5, 5, 192) 399360 block8_5_ac[0][0]
__________________________________________________________________________________________________
conv2d_186 (Conv2D) (None, 5, 5, 256) 172032 activation_185[0][0]
__________________________________________________________________________________________________
batch_normalization_183 (BatchN (None, 5, 5, 192) 576 conv2d_183[0][0]
__________________________________________________________________________________________________
batch_normalization_186 (BatchN (None, 5, 5, 256) 768 conv2d_186[0][0]
__________________________________________________________________________________________________
activation_183 (Activation) (None, 5, 5, 192) 0 batch_normalization_183[0][0]
__________________________________________________________________________________________________
activation_186 (Activation) (None, 5, 5, 256) 0 batch_normalization_186[0][0]
__________________________________________________________________________________________________
block8_6_mixed (Concatenate) (None, 5, 5, 448) 0 activation_183[0][0]
activation_186[0][0]
__________________________________________________________________________________________________
block8_6_conv (Conv2D) (None, 5, 5, 2080) 933920 block8_6_mixed[0][0]
__________________________________________________________________________________________________
block8_6 (Lambda) (None, 5, 5, 2080) 0 block8_5_ac[0][0]
block8_6_conv[0][0]
__________________________________________________________________________________________________
block8_6_ac (Activation) (None, 5, 5, 2080) 0 block8_6[0][0]
__________________________________________________________________________________________________
conv2d_188 (Conv2D) (None, 5, 5, 192) 399360 block8_6_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_188 (BatchN (None, 5, 5, 192) 576 conv2d_188[0][0]
__________________________________________________________________________________________________
activation_188 (Activation) (None, 5, 5, 192) 0 batch_normalization_188[0][0]
__________________________________________________________________________________________________
conv2d_189 (Conv2D) (None, 5, 5, 224) 129024 activation_188[0][0]
__________________________________________________________________________________________________
batch_normalization_189 (BatchN (None, 5, 5, 224) 672 conv2d_189[0][0]
__________________________________________________________________________________________________
activation_189 (Activation) (None, 5, 5, 224) 0 batch_normalization_189[0][0]
__________________________________________________________________________________________________
conv2d_187 (Conv2D) (None, 5, 5, 192) 399360 block8_6_ac[0][0]
__________________________________________________________________________________________________
conv2d_190 (Conv2D) (None, 5, 5, 256) 172032 activation_189[0][0]
__________________________________________________________________________________________________
batch_normalization_187 (BatchN (None, 5, 5, 192) 576 conv2d_187[0][0]
__________________________________________________________________________________________________
batch_normalization_190 (BatchN (None, 5, 5, 256) 768 conv2d_190[0][0]
__________________________________________________________________________________________________
activation_187 (Activation) (None, 5, 5, 192) 0 batch_normalization_187[0][0]
__________________________________________________________________________________________________
activation_190 (Activation) (None, 5, 5, 256) 0 batch_normalization_190[0][0]
__________________________________________________________________________________________________
block8_7_mixed (Concatenate) (None, 5, 5, 448) 0 activation_187[0][0]
activation_190[0][0]
__________________________________________________________________________________________________
block8_7_conv (Conv2D) (None, 5, 5, 2080) 933920 block8_7_mixed[0][0]
__________________________________________________________________________________________________
block8_7 (Lambda) (None, 5, 5, 2080) 0 block8_6_ac[0][0]
block8_7_conv[0][0]
__________________________________________________________________________________________________
block8_7_ac (Activation) (None, 5, 5, 2080) 0 block8_7[0][0]
__________________________________________________________________________________________________
conv2d_192 (Conv2D) (None, 5, 5, 192) 399360 block8_7_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_192 (BatchN (None, 5, 5, 192) 576 conv2d_192[0][0]
__________________________________________________________________________________________________
activation_192 (Activation) (None, 5, 5, 192) 0 batch_normalization_192[0][0]
__________________________________________________________________________________________________
conv2d_193 (Conv2D) (None, 5, 5, 224) 129024 activation_192[0][0]
__________________________________________________________________________________________________
batch_normalization_193 (BatchN (None, 5, 5, 224) 672 conv2d_193[0][0]
__________________________________________________________________________________________________
activation_193 (Activation) (None, 5, 5, 224) 0 batch_normalization_193[0][0]
__________________________________________________________________________________________________
conv2d_191 (Conv2D) (None, 5, 5, 192) 399360 block8_7_ac[0][0]
__________________________________________________________________________________________________
conv2d_194 (Conv2D) (None, 5, 5, 256) 172032 activation_193[0][0]
__________________________________________________________________________________________________
batch_normalization_191 (BatchN (None, 5, 5, 192) 576 conv2d_191[0][0]
__________________________________________________________________________________________________
batch_normalization_194 (BatchN (None, 5, 5, 256) 768 conv2d_194[0][0]
__________________________________________________________________________________________________
activation_191 (Activation) (None, 5, 5, 192) 0 batch_normalization_191[0][0]
__________________________________________________________________________________________________
activation_194 (Activation) (None, 5, 5, 256) 0 batch_normalization_194[0][0]
__________________________________________________________________________________________________
block8_8_mixed (Concatenate) (None, 5, 5, 448) 0 activation_191[0][0]
activation_194[0][0]
__________________________________________________________________________________________________
block8_8_conv (Conv2D) (None, 5, 5, 2080) 933920 block8_8_mixed[0][0]
__________________________________________________________________________________________________
block8_8 (Lambda) (None, 5, 5, 2080) 0 block8_7_ac[0][0]
block8_8_conv[0][0]
__________________________________________________________________________________________________
block8_8_ac (Activation) (None, 5, 5, 2080) 0 block8_8[0][0]
__________________________________________________________________________________________________
conv2d_196 (Conv2D) (None, 5, 5, 192) 399360 block8_8_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_196 (BatchN (None, 5, 5, 192) 576 conv2d_196[0][0]
__________________________________________________________________________________________________
activation_196 (Activation) (None, 5, 5, 192) 0 batch_normalization_196[0][0]
__________________________________________________________________________________________________
conv2d_197 (Conv2D) (None, 5, 5, 224) 129024 activation_196[0][0]
__________________________________________________________________________________________________
batch_normalization_197 (BatchN (None, 5, 5, 224) 672 conv2d_197[0][0]
__________________________________________________________________________________________________
activation_197 (Activation) (None, 5, 5, 224) 0 batch_normalization_197[0][0]
__________________________________________________________________________________________________
conv2d_195 (Conv2D) (None, 5, 5, 192) 399360 block8_8_ac[0][0]
__________________________________________________________________________________________________
conv2d_198 (Conv2D) (None, 5, 5, 256) 172032 activation_197[0][0]
__________________________________________________________________________________________________
batch_normalization_195 (BatchN (None, 5, 5, 192) 576 conv2d_195[0][0]
__________________________________________________________________________________________________
batch_normalization_198 (BatchN (None, 5, 5, 256) 768 conv2d_198[0][0]
__________________________________________________________________________________________________
activation_195 (Activation) (None, 5, 5, 192) 0 batch_normalization_195[0][0]
__________________________________________________________________________________________________
activation_198 (Activation) (None, 5, 5, 256) 0 batch_normalization_198[0][0]
__________________________________________________________________________________________________
block8_9_mixed (Concatenate) (None, 5, 5, 448) 0 activation_195[0][0]
activation_198[0][0]
__________________________________________________________________________________________________
block8_9_conv (Conv2D) (None, 5, 5, 2080) 933920 block8_9_mixed[0][0]
__________________________________________________________________________________________________
block8_9 (Lambda) (None, 5, 5, 2080) 0 block8_8_ac[0][0]
block8_9_conv[0][0]
__________________________________________________________________________________________________
block8_9_ac (Activation) (None, 5, 5, 2080) 0 block8_9[0][0]
__________________________________________________________________________________________________
conv2d_200 (Conv2D) (None, 5, 5, 192) 399360 block8_9_ac[0][0]
__________________________________________________________________________________________________
batch_normalization_200 (BatchN (None, 5, 5, 192) 576 conv2d_200[0][0]
__________________________________________________________________________________________________
activation_200 (Activation) (None, 5, 5, 192) 0 batch_normalization_200[0][0]
__________________________________________________________________________________________________
conv2d_201 (Conv2D) (None, 5, 5, 224) 129024 activation_200[0][0]
__________________________________________________________________________________________________
batch_normalization_201 (BatchN (None, 5, 5, 224) 672 conv2d_201[0][0]
__________________________________________________________________________________________________
activation_201 (Activation) (None, 5, 5, 224) 0 batch_normalization_201[0][0]
__________________________________________________________________________________________________
conv2d_199 (Conv2D) (None, 5, 5, 192) 399360 block8_9_ac[0][0]
__________________________________________________________________________________________________
conv2d_202 (Conv2D) (None, 5, 5, 256) 172032 activation_201[0][0]
__________________________________________________________________________________________________
batch_normalization_199 (BatchN (None, 5, 5, 192) 576 conv2d_199[0][0]
__________________________________________________________________________________________________
batch_normalization_202 (BatchN (None, 5, 5, 256) 768 conv2d_202[0][0]
__________________________________________________________________________________________________
activation_199 (Activation) (None, 5, 5, 192) 0 batch_normalization_199[0][0]
__________________________________________________________________________________________________
activation_202 (Activation) (None, 5, 5, 256) 0 batch_normalization_202[0][0]
__________________________________________________________________________________________________
block8_10_mixed (Concatenate) (None, 5, 5, 448) 0 activation_199[0][0]
activation_202[0][0]
__________________________________________________________________________________________________
block8_10_conv (Conv2D) (None, 5, 5, 2080) 933920 block8_10_mixed[0][0]
__________________________________________________________________________________________________
block8_10 (Lambda) (None, 5, 5, 2080) 0 block8_9_ac[0][0]
block8_10_conv[0][0]
__________________________________________________________________________________________________
conv_7b (Conv2D) (None, 5, 5, 1536) 3194880 block8_10[0][0]
__________________________________________________________________________________________________
conv_7b_bn (BatchNormalization) (None, 5, 5, 1536) 4608 conv_7b[0][0]
__________________________________________________________________________________________________
conv_7b_ac (Activation) (None, 5, 5, 1536) 0 conv_7b_bn[0][0]
__________________________________________________________________________________________________
global_average_pooling2d (Globa (None, 1536) 0 conv_7b_ac[0][0]
__________________________________________________________________________________________________
DR0 (Dropout) (None, 1536) 0 global_average_pooling2d[0][0]
__________________________________________________________________________________________________
dense_3 (Dense) (None, 1024) 1573888 DR0[0][0]
__________________________________________________________________________________________________
DR4 (Dropout) (None, 1024) 0 dense_3[0][0]
__________________________________________________________________________________________________
dense (Dense) (None, 1024) 1573888 DR0[0][0]
__________________________________________________________________________________________________
dense_4 (Dense) (None, 1024) 1049600 DR4[0][0]
__________________________________________________________________________________________________
DR1 (Dropout) (None, 1024) 0 dense[0][0]
__________________________________________________________________________________________________
DR5 (Dropout) (None, 1024) 0 dense_4[0][0]
__________________________________________________________________________________________________
dense_1 (Dense) (None, 512) 524800 DR1[0][0]
__________________________________________________________________________________________________
dense_5 (Dense) (None, 512) 524800 DR5[0][0]
__________________________________________________________________________________________________
DR2 (Dropout) (None, 512) 0 dense_1[0][0]
__________________________________________________________________________________________________
DR6 (Dropout) (None, 512) 0 dense_5[0][0]
__________________________________________________________________________________________________
dense_2 (Dense) (None, 512) 262656 DR2[0][0]
__________________________________________________________________________________________________
dense_6 (Dense) (None, 512) 262656 DR6[0][0]
__________________________________________________________________________________________________
DR3 (Dropout) (None, 512) 0 dense_2[0][0]
__________________________________________________________________________________________________
DR7 (Dropout) (None, 512) 0 dense_6[0][0]
__________________________________________________________________________________________________
class_op (Dense) (None, 196) 100548 DR3[0][0]
__________________________________________________________________________________________________
reg_op (Dense) (None, 4) 2052 DR7[0][0]
==================================================================================================
Total params: 60,211,624
Trainable params: 60,151,080
Non-trainable params: 60,544
__________________________________________________________________________________________________
def iou(box1, box2):
"""Implement the intersection over union (IoU) between box1 and box2
Arguments:
box1 -- first box, list object with coordinates (box1_x1, box1_y1, box1_x2, box_1_y2)
box2 -- second box, list object with coordinates (box2_x1, box2_y1, box2_x2, box2_y2)
"""
# Extract x, y co-ordinates of both boxes
(box1_x1, box1_y1, box1_x2, box1_y2) = box1
(box2_x1, box2_y1, box2_x2, box2_y2) = box2
# Calculate the (xi1, yi1, xi2, yi2) coordinates of the intersection of box1 and box2.
# Calculate its Area.
xi1 = max(box1_x1, box2_x1)
yi1 = max(box1_y1, box2_y1)
xi2 = min(box1_x2, box2_x2)
yi2 = min(box1_y2, box2_y2)
inter_width = max((xi2 - xi1),0)
inter_height = max((yi2 - yi1),0)
inter_area = inter_width * inter_height
# Calculate the Union area by using Formula: Union(A,B) = A + B - Inter(A,B)
box1_area = (box1_y2 - box1_y1) * (box1_x2 - box1_x1)
box2_area = (box2_y2 - box2_y1) * (box2_x2 - box2_x1)
union_area = box1_area + box2_area - inter_area
# compute the IoU
iou = inter_area / union_area
return iou
def calc_iou(y_true, y_pred):
num_samp = y_true.shape[0]
results = []
for ind in range(y_true.shape[0]):
box1 = y_true[ind].numpy().copy()
box2 = y_pred[ind].numpy().copy()
iou_samp = iou(box1, box2)
results.append(iou_samp)
mean_iou = np.mean(results)
mean_iou = mean_iou.astype(np.float32)
return mean_iou
def IoU(y_true, y_pred):
return tf.py_function(calc_iou, [y_true, y_pred], tf.float32)
def model_compile(model, compile_dict):
"""
Function to compile the model
Arguments:
model - Model instance that needs to be compiled
compile_dict - Dictionary with list of keys / values needed to compile the model
"""
# Retrieve compile_dict parameters
ilr = compile_dict['ilr'] # Initial learning rate to use for learning rate decay scheduler
dr = compile_dict['dr'] # Decay rate to use for learning rate decay scheduler
ds = compile_dict['ds'] # Decay step to use for learning rate decay scheduler
redlr_plat = compile_dict['redlr_plat'] # Boolean: If True, implement reduce LR on plateau
loss_type = compile_dict['loss_type'] # Classification or Localization
# Define Optimizer
lr_sch = InverseTimeDecay(ilr, ds, dr) # Inverse Time Decay LR scheduler
if (redlr_plat):
opt = optimizers.Adam(learning_rate = ilr)
else:
opt = optimizers.Adam(learning_rate = lr_sch)
# Define Loss:
if (loss_type == 'classification'):
loss = losses.CategoricalCrossentropy() # Loss function
met = [metrics.CategoricalAccuracy()] # Metric
model.compile(optimizer = opt, loss = loss, metrics = met) # Compile model
elif (loss_type == 'localization'):
# CategoricalCrossEntropy for 'class_op', MeanSquaredError for 'reg_op'
loss = {'class_op': losses.CategoricalCrossentropy(), 'reg_op': losses.MeanSquaredError()} # Loss fn.
loss_weights_dict = {'class_op': 1, 'reg_op': 1000} # Loss weights
met = {'class_op': [metrics.CategoricalAccuracy()], 'reg_op': [IoU]} # Define metric
# Compile model
model.compile(optimizer = opt, loss = loss, loss_weights = loss_weights_dict, metrics = met)
def model_fit(model, train_dict):
"""
Function to fit the model
Arguments:
model - Model instance that needs to be trained
train_dict - Dictionary with list of keys / values needed to fit the model
Returns:
model - Final trained model
hist - Model training history
"""
# Retrieve path parameters
tb_path = train_dict['tb_path'] # Path to store Tensorboard callback information
mc_path = train_dict['mc_path'] # File name to use for storing model checkpoints
# Retrieve callback parameters
mcp_freq = train_dict['mcp_freq'] # Number of batches after which model will be checkpointed
early_stop = train_dict['early_stop'] # Boolean: If True, implement early stop
redlr_plat = train_dict['redlr_plat'] # Boolean: If True, implement reduce LR on plateau
lrpl_fac = train_dict['lrpl_fac'] # Factor to use for Reduce LR on Plateau callback
lrpl_pat = train_dict['lrpl_pat'] # Patience to use for Reduce LR on Plateau callback
# Retrieve training parameters
train_gen = train_dict['train_gen'] # Train Generator to use while fitting
val_data = train_dict['val_gen'] # Validation Generator to use while fitting
epochs = train_dict['epochs'] # Number of epochs to train for
initial_epoch = train_dict['initial_epoch'] # Initial epoch to re-start training from
train_steps_per_epoch = train_dict['train_steps_per_epoch'] # Number of steps per training epoch
val_steps = train_dict['val_steps'] # Number of steps before stopping validation
val_freq = train_dict['val_freq'] # Number of epochs to run before performing a validation run
verb = train_dict['verb'] # Controls verbosity level of model fit.
#### Start - Define callbacks
# Define path for tensorboard logs
logdir = os.path.join(tb_path,\
datetime.datetime.now(pytz.timezone('Asia/Kolkata')).strftime("%d%m_%H%M"))
# Define Tensorboard callback
tensorboard_callback = TensorBoard(logdir, histogram_freq = 0)
# Define Model Checkpoint callback
mcp_callback = ModelCheckpoint(filepath = mc_path, monitor = "val_loss", save_freq = mcp_freq,\
verbose = 0, save_best_only = True)
# Define Early Stopping callback
earlystopping_callback = EarlyStopping(monitor = "val_loss", min_delta = 0, patience = 35,\
mode = "min", verbose = 1)
# Define 'Reduce learning rate on plateau' callback
redlr_plat_callback = ReduceLROnPlateau(monitor = "val_loss", factor = lrpl_fac, patience = lrpl_pat,\
verbose = 1, mode = "min", min_delta = 0.0001)
# Define list of all callbacks
callback_list = []
if (tb_path != None): callback_list.append(tensorboard_callback)
if (mc_path != None): callback_list.append(mcp_callback)
if (early_stop): callback_list.append(earlystopping_callback)
if (redlr_plat): callback_list.append(redlr_plat_callback)
#### End - Define callbacks
#### Start - Model Fit
hist = model.fit(x = train_gen, validation_data = val_data, epochs = epochs,\
initial_epoch = initial_epoch, steps_per_epoch = train_steps_per_epoch,\
validation_steps = val_steps, validation_freq = val_freq,
callbacks = callback_list, verbose = verb)
#### End - Model Fit
return model, hist
def model_train(model_dict, compile_dict, train_dict, model = None):
"""
Function to instantiate (or load) model, compile and fit model.
Arguments:
model_dict - Dictionary with list of keys / values needed to build the model
compile_dict - Dictionary with list of keys / values needed to compile the model
train_dict - Dictionary with list of keys / values needed to train the model
model - Pre-trained model (Pass this as input only if fit_resume = True and load_model = False)
Returns:
model - Final trained model
hist - Model training history
"""
# Retrieve train_dict parameters
fit_resume = train_dict['fit_resume'] # Boolean: If True, resume fit from initial epoch
load_model = train_dict['load_model'] # Boolean: If True, load model from 'fm_path' and resume fit
recompile = train_dict['recompile'] # Boolean: If True, recompile model before resuming fit
fm_path = train_dict['fm_path'] # File name to use for storing final trained model
hi_path = train_dict['hi_path'] # File name to use for storing training history
if (not(fit_resume)): # fit_resume = False => instantiating new model
print("Instantiating new model...", end = ', ')
model = model_core(model_dict) # Instantiate new model
print("Compiling model...", end = ', ')
model_compile(model, compile_dict) # Compile model
print("Model Fit started....", end = ', ')
model, hist = model_fit(model, train_dict) # Fit model
else: # fit_resume = True => Proceed with existing model in memory or load model from disk
if (load_model): # load_model = True => Load model from disk
print("Loading model from disk...", end = ', ')
model = models.load_model(fm_path) # Reload model from disk
if (recompile): # Re-compile model if "recompile" = True
print("Re-Compiling model...", end = ', ')
model_compile(model, compile_dict)
print("Resuming model fit....", end = ', ')
model, hist = model_fit(model, train_dict) # Resume model fit
else: # load_model = False => Proceed with existing model in memory
if (recompile): # Re-compile model if "recompile" = True
print("Re-compiling model...", end = ', ')
model_compile(model, compile_dict)
print("Resuming model fit....", end = ', ')
model, hist = model_fit(model, train_dict) # Resume model fit
# Save final trained model and history to file
if (fm_path != None): model.save(fm_path, overwrite = True, save_format = 'h5')
if (hi_path != None): np.save(hi_path, hist.history)
return model, hist
class MyHyperModel(HyperModel):
def __init__(self, model_dict, compile_dict, hp_dict):
self.model_dict_tune = model_dict.copy() # Dictionary of default model parameters
self.compile_dict_tune = compile_dict.copy() # Dictionary of default compilation parameters
self.hp_dict = hp_dict # Dictionary of hyperparameters to tune
def build(self, hp):
hp_list = [] # List placeholder to define all hyperparameters
for ind in range(len(hp_dict)): # Update hp_list based on hp_dict
if (hp_dict[ind][0] == 'choice'): hp_list.append(hp.Choice(name = hp_dict[ind][1],\
values = hp_dict[ind][2],\
ordered = hp_dict[ind][3]))
# Set hyperparameters in model_dict_tune
self.model_dict_tune['dropout_rate'] = [hp_list[0], hp_list[1]]
model = model_core(self.model_dict_tune) # Instantiate model
model_compile(model, self.compile_dict_tune) # Compile model
return model
### Define file paths
mod_file_pref = "MC_01" # Prefix to use for naming files and paths
tb_path = os.path.join(tb_logs_base_path, mod_file_pref) # Tensorboard base path
fm_path = os.path.join(out_base_path, mod_file_pref + "_finalmodel.h5") # Final trained model path
#mc_path = os.path.join(out_base_path, mod_file_pref + "_EP{epoch:04d}.h5") # Model checkpoints path
mc_path = os.path.join("/content", mod_file_pref + ".h5") # Model checkpoints path
hi_path = os.path.join(out_base_path, mod_file_pref + "_hist.npy") # Training history path
# Set model_dict values
model_dict = {'model_arch': 'C',
'mod_inp_shape': mod_inp_shape,
'weights': 'imagenet',
'trainable': True,
'dropout_rate': [0.4, 0.7, 0.0, 0.0, 0.2, 0.2, 0.1, 0.1],
'num_classes': num_classes
}
# Set compile_dict values
compile_dict = {'ilr': 5e-4, # Initial learning rate to use for learning rate decay scheduler
'dr': 1, # Decay rate to use for learning rate decay scheduler
'ds': (len(train_generator) * 10), # Decay rate to use for learning rate decay scheduler
'redlr_plat': True, # Boolean: If True, implement reduce LR on plateau
'loss_type': 'localization' # Set to 'classification' or 'localization'
}
# Set train_dict values
train_dict = {'fit_resume': False, # Boolean: If True, resume fit from initial epoch
'load_model': False, # Boolean: If True, load model from 'fm_path' and resume fit
'recompile': False, # Boolean: If True, recompile model before resuming fit
'train_gen': train_generator, # Train generator to use while fitting
'val_gen': val_generator, # Validation generator to use while fitting
'epochs': 250, # Number of epochs to train for
'initial_epoch': 0, # Initial epoch to start from
'train_steps_per_epoch': len(train_generator), # No. of steps per epoch
'val_steps': len(val_generator), # No. of steps before stopping eval of val set
'val_freq': 1, # Number of epochs to run before performing a validation run
'verb': 1, # Controls verbosity level of model fit.
'mcp_freq': "epoch", # Checkpoint model after mcp_freq batches
'early_stop': True, # Boolean: If True, implement early stop
'redlr_plat': compile_dict['redlr_plat'], # Boolean: If True, implement reduce LR on plateau
'lrpl_fac': 0.2, # Factor to use for Reduce LR on Plateau callback
'lrpl_pat': 10, # Patience to use for Reduce LR on Plateau callback
'tb_path': tb_path, # Path to store Tensorboard callback information
'mc_path': mc_path, # File name to use for storing model checkpoints
'fm_path': fm_path, # File name to use for storing final trained model
'hi_path': hi_path, # File name to use for storing training history
}
print(tb_path)
print(mc_path)
print(fm_path)
print(hi_path)
/content/drive/MyDrive/AI_ML_Folder/Colab_Directory/Model_Outputs/Stanford_Car_Dataset/TB_Logs/MC_01 /content/MC_01.h5 /content/drive/MyDrive/AI_ML_Folder/Colab_Directory/Model_Outputs/Stanford_Car_Dataset/Training_Outputs/MC_01_finalmodel.h5 /content/drive/MyDrive/AI_ML_Folder/Colab_Directory/Model_Outputs/Stanford_Car_Dataset/Training_Outputs/MC_01_hist.npy
# %tensorboard --logdir {tb_logs_base_path}
This section is commented out as the models have already been trained and stored to disk.
# # Get start time of run and display it
# start_time = datetime.datetime.now(pytz.timezone('Asia/Kolkata'))
# print("Started at %s" %(start_time.strftime("%H:%M:%S")), end = '; ')
# # Instantiate, compile and fit model
# if (train_dict['fit_resume'] and not(train_dict['load_model'])):
# model, hist = model_train(model_dict, compile_dict, train_dict, model)
# else:
# model, hist = model_train(model_dict, compile_dict, train_dict)
# # Get end time of run and display elapsed time
# end_time = datetime.datetime.now(pytz.timezone('Asia/Kolkata'))
# elap_time = ((end_time - start_time).total_seconds())/60
# print("\nCompleted at %s. Elapsed time = %0.2f minutes." %(end_time.strftime("%H:%M:%S"), elap_time))
# # Copy best model to drive
# model_path = os.path.join(out_base_path, mod_file_pref + "_bestmodel.h5")
# !cp /content/MC_01.h5 {model_path}
# !rm /content/MC_01.h5
# from lr_finder import LRFinder
# opt = optimizers.Adam()
# loss = losses.CategoricalCrossentropy() # Define loss
# met = [metrics.CategoricalAccuracy()] # Define metric
# model.compile(optimizer = opt, loss = loss, metrics = met)
# # model is a Keras model
# lr_finder = LRFinder(model)
# lr_finder.find_generator(train_generator, start_lr = 1e-5, end_lr = 0.1, epochs = 5,\
# steps_per_epoch=len(train_generator))
# lr_finder.plot_loss(n_skip_beginning=0, n_skip_end=5)
# project_name = "MA_DR_Tune" # Directory to store results of hyperparameter tuning
# # Define hyperparameters in hp_dict
# hp_dict = {0: ['choice', 'Dropout_0', [0.2, 0.3, 0.4, 0.5], True],
# 1: ['choice', 'Dropout_1', [0.2, 0.3, 0.4, 0.5], True],
# }
# # Instantiate hypermodel
# hypermodel = MyHyperModel(model_dict, compile_dict, hp_dict)
# # Instantiate Tuner
# tuner = kt.RandomSearch(hypermodel, objective = 'val_categorical_accuracy', max_trials = 2,\
# seed = 1234, directory = kt_logs_base_path, project_name = project_name)
# # Display Hyperparameter Search Space
# tuner.search_space_summary()
# # Run hyperparameter search
# tuner.search(x = train_dict['train_gen'], validation_data = train_dict['val_gen'],\
# epochs = 2, steps_per_epoch = train_dict['train_steps_per_epoch'],\
# validation_steps = train_dict['val_steps'], verbose = 1)
# best_hps = tuner.get_best_hyperparameters(num_trials = 1)[0].get_config()['values']
# print()
# print("Best Hyperparameters:")
# print("---------------------")
# print(best_hps)
# # best_hps_dict = tuner.get_best_hyperparameters(num_trials = 1)[0]
# # best_model = tuner.hypermodel.build(best_hps_dict)
# # best_model.summary()
def get_model(mod_file_prefix, best_model, custom_objects, print_summary = False):
'''
Function to return required paths, loaded model and print model summary
Arguments:
mod_file_prefix: # Prefix of model file name
best_model: Boolean. If True, load best_model, else load final model
print_summary: Boolean. If True, print model summary
custom_objects: List indicating whether custom objects need to defined while loading model
'''
fm_path = os.path.join(out_base_path, mod_file_pref + "_finalmodel.h5") # Final model path
bm_path = os.path.join(out_base_path, mod_file_pref + "_bestmodel.h5")
hi_path = os.path.join(out_base_path, mod_file_pref + "_hist.npy") # History file path
# Load model from disk
if (custom_objects[0]): # custom_objects[0] = True => Load custom objects
if (best_model): # best_model = True => Load best model
model = models.load_model(bm_path, custom_objects = custom_objects[1])
else: # best_model = False => Load final model
model = models.load_model(fm_path, custom_objects = custom_objects[1])
else: # custom_objects[0] = False => No custom objects to be loaded
if (best_model): # best_model = True => Load best model
model = models.load_model(bm_path)
else: # best_model = False => Load final model
model = models.load_model(fm_path)
# Print model summary
if (print_summary):
display(model.summary())
return fm_path, hi_path, model
def plot_lc(mod_file_pref, hi_path, hist_plot_dict, num_cols, col_size, row_size):
'''
Function to plot learning curves
Arguments:
hi_path: path of history file
hist_plot_dict: Dictionary containing items to plot
num_cols: Number of columns to use for plotting
col_size: Column width to use while plotting
row_size: Row width to use while plotting
'''
hist_model = np.load(hi_path, allow_pickle = 'TRUE').item()
num_plots = len(hist_plot_dict)
num_rows = math.ceil(num_plots / num_cols) # Number of rows to use for plotting
fig = plt.figure(figsize = ((num_cols * col_size), (num_rows * row_size)))
fig.suptitle(mod_file_pref + " Learning Curves", fontsize = 20)
for ind, value in enumerate(hist_plot_dict.items()):
ax = plt.subplot(num_rows, num_cols, (ind + 1))
for key in value[1][0].keys():
ax.plot(hist_model[key], value[1][0][key][0], label = value[1][0][key][1])
ax.set_title(value[1][1], fontsize = 20)
ax.set_ylabel(value[1][2], fontsize = 20)
ax.set_xlabel(value[1][3], fontsize = 20)
ax.grid(b = True)
ax.legend(fontsize = 15)
def plot_cm(train_gen, val_gen, test_gen, y_train, y_val, y_test, row_size, col_size):
'''
Function to plot confusion matrices for train, val and test sets
Arguments:
train_gen: Train generator without augmentation and shuffling
val_gen: Validation generator without augmentation and shuffling
test_gen: Test generator without augmentation and shuffling
y_train: Ground-truth training set labels
y_val: Ground-truth validation set labels
y_test: Ground-truth testing set labels
row_size: Row size to use for plotting
col_size: Column size to use for plotting
'''
# Obtain predicted labels for training, validation and testing sets
y_train_pred = np.argmax(model.predict(x = train_gen, steps = len(train_gen), verbose = 0), axis = 1)
y_val_pred = np.argmax(model.predict(x = val_gen, steps = len(val_gen), verbose = 0), axis = 1)
y_test_pred = np.argmax(model.predict(x = test_gen, steps = len(test_gen), verbose = 0), axis = 1)
# Generate confusion matrices
cm_train = tf.math.confusion_matrix(y_train, y_train_pred).numpy()
cm_val = tf.math.confusion_matrix(y_val, y_val_pred).numpy()
cm_test = tf.math.confusion_matrix(y_test, y_test_pred).numpy()
# Convert confusion matrices to pandas DF
index = [('A' + str(a)) for a in list(np.unique(y_train).astype(np.int))]
columns = [('P' + str(a)) for a in list(np.unique(y_train).astype(np.int))]
cm_train_df = pd.DataFrame(cm_train, index = index, columns = columns)
cm_val_df = pd.DataFrame(cm_val, index = index, columns = columns)
cm_test_df = pd.DataFrame(cm_test, index = index, columns = columns)
# Plot confusion matrices
fig = plt.figure(figsize = (col_size, (3 * row_size)))
ax1 = plt.subplot(3, 1, 1)
sns.heatmap(cm_train_df, annot_kws = {"fontsize": 15}, linewidths = 1,\
linecolor = 'black', cmap = 'Blues', annot = True ,fmt = 'g',\
cbar = False, ax = ax1)
ax1.set_title("Confusion Matrix for Training set", fontsize = 25)
ax1.tick_params(labelsize = 20)
ax2 = plt.subplot(3, 1, 2)
sns.heatmap(cm_val_df, annot_kws = {"fontsize": 15}, linewidths = 1,\
linecolor = 'black', cmap = 'Blues', annot = True ,fmt = 'g',\
cbar = False, ax = ax2)
ax2.set_title("Confusion Matrix for Validation set", fontsize = 25)
ax2.tick_params(labelsize = 20)
ax3 = plt.subplot(3, 1, 3)
sns.heatmap(cm_test_df, annot_kws = {"fontsize": 15}, linewidths = 1,\
linecolor = 'black', cmap = 'Blues', annot = True ,fmt = 'g',\
cbar = False, ax = ax3)
ax3.set_title("Confusion Matrix for Test set", fontsize = 25)
ax3.tick_params(labelsize = 20)
plt.show()
def predict_and_plot_df(df, x_col, y_col, img_root_path, img_cons, samp_indices, plot_bbox, model,\
mod_inp_shape, ind_class_dict, num_cols, col_size, row_size, fig_title):
'''
Function to make predictions on a subset of data and plot images with actual
and predicted labels
Arguments:
df: Name of dataframe to read image details from
x_col: Name of column in dataframe that contains file names
y_col: Name of column in dataframe that contains the class names
img_root_path: Root directory path where images are stored
img_cons: Boolean: If True, then all images are assumed to consolidated in img_path (no sub-directories).
If False, then the images are assumed to be present inside sub-directories in img_path
named with the class name.
plot_bbox: Boolean: If True, plot bounding-boxes on top of image
Should be set to True only if Dataframe has BBOX co-ords
model: Trained model using which predictions will be made
mod_inp_shape: Shape required for input image to model
num_cols: Number of columns to use for plotting
col_size: Size of columns to use for plotting
row_size: Size of rows to use for plotting
fig_title: Title to use for overall figure
'''
num_rows = math.ceil(len(samp_indices) / num_cols) # Number of rows to use for plotting
fig = plt.figure(figsize = ((num_cols * col_size), (num_rows * row_size)))
fig.suptitle(fig_title, fontsize = 40)
for ind, samp_ind in enumerate(samp_indices):
img_file_name = df.iloc[samp_ind][x_col] # Extract file name of image
img_class = df.iloc[samp_ind][y_col] # Extract class of image
if (img_cons): # img_cons = True -> All images consolidated in img_root_path
img_file_path = os.path.join(img_root_path, img_file_name) # Obtain full path of image
else: # img_cons = False -> Each image is present in a sub-directory inside img_root_path
img_file_path = os.path.join(img_root,path, img_class, img_file_path)
img = cv2.imread(img_file_path, cv2.IMREAD_COLOR) # Load image using cv2 as a 3-channel RGB image
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) # Convert image to RGB format
img_for_model = cv2.resize(img, (mod_inp_shape[0], mod_inp_shape[1]),\
interpolation = cv2.INTER_CUBIC)
img_for_model = preprocess_data(img_for_model)
img_for_model = np.expand_dims(img_for_model, axis = 0)
pred = model.predict(img_for_model)
if (plot_bbox): # plot_bbox = True => Plot Bounding-Box on top of image
ax = plt.subplot(num_rows, num_cols, (ind + 1))
# Extract image dimensions
img_h, img_w = df.iloc[samp_ind]['img_h'], df.iloc[samp_ind]['img_w']
# Get ground-truth BBOX co-ords
xmin, ymin, xmax, ymax = df.iloc[samp_ind]['xmin'], df.iloc[samp_ind]['ymin'],\
df.iloc[samp_ind]['xmax'], df.iloc[samp_ind]['ymax']
# Obtain predicted normalized BBOX coords
(xmin_pred_norm, ymin_pred_norm, xmax_pred_norm, ymax_pred_norm) = pred[1][0]
# Re-scale predicted BBOX coords to image dimensions
xmin_pred = int(xmin_pred_norm * img_w)
xmax_pred = int(xmax_pred_norm * img_w)
ymin_pred = int(ymin_pred_norm * img_h)
ymax_pred = int(ymax_pred_norm * img_h)
# Set box_thickness based on image area
box_thickness = int(np.ceil(((img_h) * (img_w)) / (100000)))
# Draw ground-truth BBOX in green on top of image
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (0, 255, 0), box_thickness)
# Draw predicted BBOX in blue
cv2.rectangle(img, (xmin_pred, ymin_pred), (xmax_pred, ymax_pred), (0, 0, 255), box_thickness)
ax.text(0.5, 1.19, f"Green: GT BBOX, Blue: Predicted BBOX", transform = ax.transAxes,\
horizontalalignment = 'center', verticalalignment = 'center',\
color = 'r', fontfamily = 'sans-serif', fontsize = '15')
pred = pred[0]
pred_prob = 100 * np.max(pred)
if (ind_class_dict == None):
class_pred = np.argmax(pred, axis = 1)[0]
else:
class_pred = ind_class_dict[np.argmax(pred, axis = 1)[0]]
# Plot images
ax.text(0.5, 1.12, f"Actual label: {img_class}", transform = ax.transAxes,\
horizontalalignment = 'center', verticalalignment = 'center',\
color = 'b', fontfamily = 'sans-serif', fontsize = '15')
ax.text(0.5, 1.05, f"Pred. label: {class_pred} ({pred_prob:0.1f}%)", transform = ax.transAxes,\
horizontalalignment = 'center', verticalalignment = 'center',\
color = 'b', fontfamily = 'sans-serif', fontsize = '15')
ax.imshow(img.squeeze())
plt.show()
Model Details:
Model input image shape set to (224, 224, 3)
mod_file_pref = "MA_01" # Final ResNet50 model
custom_objects = [True, {"IoU": IoU}]
fm_path, hi_path, model = get_model(mod_file_pref, best_model = False, custom_objects = custom_objects,\
print_summary = False)
hist_plot_dict = {0: [{'loss': ['r-', 'Train_Set_Loss'],
'val_loss': ['b-', 'Val_Set_Loss']},
'Overall Loss vs. #Epoch', 'Overall Loss', '# Epochs'],
1: [{'class_op_loss': ['r-', 'Train_Set_Classfn_Loss'],
'val_class_op_loss': ['b-', 'Val_Set_Classfn_Loss']},
'Classfn Loss vs. #Epoch', 'Classfn Loss', '# Epochs'],
2: [{'class_op_categorical_accuracy': ['r-', 'Train_Set_Classfn_Accuracy'],
'val_class_op_categorical_accuracy': ['b-', 'Val_Set_Classfn_Accuracy']},
'Classfn Accuracy vs. #Epoch', 'Classfn Accuracy', '# Epochs'],
3: [{'reg_op_loss': ['r-', 'Train_Set_Regrn_Loss'],
'val_reg_op_loss': ['b-', 'Val_Set_Regrn_Loss']},
'Regression Loss vs. #Epoch', 'Regression Loss', '# Epochs'],
4: [{'reg_op_IoU': ['r-', 'Train_Set_BBOX_IoU'],
'val_reg_op_IoU': ['b-', 'Val_Set_BBOX_IoU']},
'BBOX IoU vs. #Epoch', 'Regression Accuracy', '# Epochs']
}
plot_lc(mod_file_pref, hi_path, hist_plot_dict, 3, 7, 8)
No augmentation will be done and shuffling will be turned off. 'preprocessing_function' will be set based on the model that's being loaded.
bm_name = 'ResNet50' # Base model name
mod_inp_shape = (224, 224, 3) # Target image size for model input
mod_bat_size = 64 # Batch Size
seq_rescale = iaa.size.Resize(size = mod_inp_shape[0], interpolation = cv2.INTER_CUBIC)
train_generator = batch_generator_from_df(train_df, train_img_path, 'filename', 'class', True,\
mod_bat_size, mod_inp_shape[0:2], aug = False, shuffle = False,\
preprocessing_function = preprocess_data)
val_generator = batch_generator_from_df(val_df, test_img_path, 'filename', 'class', True,\
mod_bat_size, mod_inp_shape[0:2], aug = False, shuffle = False,\
preprocessing_function = preprocess_data)
test_generator = batch_generator_from_df(test_df, test_img_path, 'filename', 'class', True,\
mod_bat_size, mod_inp_shape[0:2], aug = False, shuffle = False,\
preprocessing_function = preprocess_data)
Found 8144 images belonging to 196 classes Found 1176 images belonging to 196 classes Found 6865 images belonging to 196 classes
# Evaluate model on training set
res = model.evaluate(x = train_generator, steps = len(train_generator), verbose = 0, return_dict = True)
print("Training set: Overall loss is %0.4f" % res['loss'])
print("Training set: Classification loss is %0.4f" % res['class_op_loss'])
print("Training set: Regression loss is %0.4f" % res['reg_op_loss'])
print("Training set: Classification accuracy is %0.2f %%"\
% (100 * res['class_op_categorical_accuracy']))
print("Training set: BBOX IoU is %0.2f"\
% (res['reg_op_IoU']))
print()
# Evaluate model on validation set
res = model.evaluate(x = val_generator, steps = len(val_generator), verbose = 0, return_dict = True)
print("Validation set: Overall loss is %0.4f" % res['loss'])
print("Validation set: Classification loss is %0.4f" % res['class_op_loss'])
print("Validation set: Regression loss is %0.4f" % res['reg_op_loss'])
print("Validation set: Classification accuracy is %0.2f %%"\
% (100 * res['class_op_categorical_accuracy']))
print("Validation set: BBOX IoU is %0.2f"\
% (res['reg_op_IoU']))
print()
# Evaluate model on test set
res = model.evaluate(x = test_generator, steps = len(test_generator), verbose = 0, return_dict = True)
print("Test set: Overall loss is %0.4f" % res['loss'])
print("Test set: Classification loss is %0.4f" % res['class_op_loss'])
print("Test set: Regression loss is %0.4f" % res['reg_op_loss'])
print("Test set: Classification accuracy is %0.2f %%"\
% (100 * res['class_op_categorical_accuracy']))
print("Test set: BBOX IoU is %0.2f"\
% (res['reg_op_IoU']))
print()
Training set: Overall loss is 0.7589 Training set: Classification loss is 0.0921 Training set: Regression loss is 0.0007 Training set: Classification accuracy is 97.09 % Training set: BBOX IoU is 0.89 Validation set: Overall loss is 1.6189 Validation set: Classification loss is 0.6896 Validation set: Regression loss is 0.0009 Validation set: Classification accuracy is 82.91 % Validation set: BBOX IoU is 0.88 Test set: Overall loss is 1.5215 Test set: Classification loss is 0.6570 Test set: Regression loss is 0.0009 Test set: Classification accuracy is 82.99 % Test set: BBOX IoU is 0.88
### Use this to plot random images from a particular set
samp_indices = np.random.randint(low = 0, high = train_df.shape[0], size = 8)
fig_title = 'Random Images from training set'
# Predict and plot
predict_and_plot_df(train_df, 'filename', 'class', train_img_path, True, samp_indices,\
True, model, mod_inp_shape, ind_class_dict, 2, 9, 9, fig_title)
### Use this to plot random images from a particular set
samp_indices = np.random.randint(low = 0, high = val_df.shape[0], size = 8)
fig_title = 'Random Images from validation set'
# Predict and plot
predict_and_plot_df(val_df, 'filename', 'class', test_img_path, True, samp_indices,\
True, model, mod_inp_shape, ind_class_dict, 2, 9, 9, fig_title)
### Use this to plot random images from a particular set
samp_indices = np.random.randint(low = 0, high = test_df.shape[0], size = 8)
fig_title = 'Random Images from test set'
# Predict and plot
predict_and_plot_df(test_df, 'filename', 'class', test_img_path, True, samp_indices,\
True, model, mod_inp_shape, ind_class_dict, 2, 9, 9, fig_title)
Model Details:
Model input image shape set to (224, 224, 3)
mod_file_pref = "MB_01"
custom_objects = [True, {"IoU": IoU}]
fm_path, hi_path, model = get_model(mod_file_pref, best_model = False, custom_objects = custom_objects,\
print_summary = False)
hist_plot_dict = {0: [{'loss': ['r-', 'Train_Set_Loss'],
'val_loss': ['b-', 'Val_Set_Loss']},
'Overall Loss vs. #Epoch', 'Overall Loss', '# Epochs'],
1: [{'class_op_loss': ['r-', 'Train_Set_Classfn_Loss'],
'val_class_op_loss': ['b-', 'Val_Set_Classfn_Loss']},
'Classfn Loss vs. #Epoch', 'Classfn Loss', '# Epochs'],
2: [{'class_op_categorical_accuracy': ['r-', 'Train_Set_Classfn_Accuracy'],
'val_class_op_categorical_accuracy': ['b-', 'Val_Set_Classfn_Accuracy']},
'Classfn Accuracy vs. #Epoch', 'Classfn Accuracy', '# Epochs'],
3: [{'reg_op_loss': ['r-', 'Train_Set_Regrn_Loss'],
'val_reg_op_loss': ['b-', 'Val_Set_Regrn_Loss']},
'Regression Loss vs. #Epoch', 'Regression Loss', '# Epochs'],
4: [{'reg_op_IoU': ['r-', 'Train_Set_BBOX_IoU'],
'val_reg_op_IoU': ['b-', 'Val_Set_BBOX_IoU']},
'BBOX IoU vs. #Epoch', 'Regression Accuracy', '# Epochs']
}
plot_lc(mod_file_pref, hi_path, hist_plot_dict, 3, 7, 8)
No augmentation will be done and shuffling will be turned off. 'preprocessing_function' will be set based on the model that's being loaded.
bm_name = 'DenseNet'
mod_inp_shape = (224, 224, 3) # Target image size for model input
mod_bat_size = 64 # Batch Size
seq_rescale = iaa.size.Resize(size = mod_inp_shape[0], interpolation = cv2.INTER_CUBIC)
train_generator = batch_generator_from_df(train_df, train_img_path, 'filename', 'class', True,\
mod_bat_size, mod_inp_shape[0:2], aug = False, shuffle = False,\
preprocessing_function = preprocess_data)
val_generator = batch_generator_from_df(val_df, test_img_path, 'filename', 'class', True,\
mod_bat_size, mod_inp_shape[0:2], aug = False, shuffle = False,\
preprocessing_function = preprocess_data)
test_generator = batch_generator_from_df(test_df, test_img_path, 'filename', 'class', True,\
mod_bat_size, mod_inp_shape[0:2], aug = False, shuffle = False,\
preprocessing_function = preprocess_data)
Found 8144 images belonging to 196 classes Found 1176 images belonging to 196 classes Found 6865 images belonging to 196 classes
# Evaluate model on training set
res = model.evaluate(x = train_generator, steps = len(train_generator), verbose = 0, return_dict = True)
print("Training set: Overall loss is %0.4f" % res['loss'])
print("Training set: Classification loss is %0.4f" % res['class_op_loss'])
print("Training set: Regression loss is %0.4f" % res['reg_op_loss'])
print("Training set: Classification accuracy is %0.2f %%"\
% (100 * res['class_op_categorical_accuracy']))
print("Training set: BBOX IoU is %0.2f"\
% (res['reg_op_IoU']))
print()
# Evaluate model on validation set
res = model.evaluate(x = val_generator, steps = len(val_generator), verbose = 0, return_dict = True)
print("Validation set: Overall loss is %0.4f" % res['loss'])
print("Validation set: Classification loss is %0.4f" % res['class_op_loss'])
print("Validation set: Regression loss is %0.4f" % res['reg_op_loss'])
print("Validation set: Classification accuracy is %0.2f %%"\
% (100 * res['class_op_categorical_accuracy']))
print("Validation set: BBOX IoU is %0.2f"\
% (res['reg_op_IoU']))
print()
# Evaluate model on test set
res = model.evaluate(x = test_generator, steps = len(test_generator), verbose = 0, return_dict = True)
print("Test set: Overall loss is %0.4f" % res['loss'])
print("Test set: Classification loss is %0.4f" % res['class_op_loss'])
print("Test set: Regression loss is %0.4f" % res['reg_op_loss'])
print("Test set: Classification accuracy is %0.2f %%"\
% (100 * res['class_op_categorical_accuracy']))
print("Test set: BBOX IoU is %0.2f"\
% (res['reg_op_IoU']))
print()
Training set: Overall loss is 0.7800 Training set: Classification loss is 0.1129 Training set: Regression loss is 0.0007 Training set: Classification accuracy is 96.64 % Training set: BBOX IoU is 0.89 Validation set: Overall loss is 1.7442 Validation set: Classification loss is 0.8764 Validation set: Regression loss is 0.0009 Validation set: Classification accuracy is 79.85 % Validation set: BBOX IoU is 0.88 Test set: Overall loss is 1.5497 Test set: Classification loss is 0.7042 Test set: Regression loss is 0.0008 Test set: Classification accuracy is 80.77 % Test set: BBOX IoU is 0.88
### Use this to plot random images from a particular set
samp_indices = np.random.randint(low = 0, high = train_df.shape[0], size = 8)
fig_title = 'Random Images from training set'
# Predict and plot
predict_and_plot_df(train_df, 'filename', 'class', train_img_path, True, samp_indices,\
True, model, mod_inp_shape, ind_class_dict, 2, 9, 9, fig_title)
### Use this to plot random images from a particular set
samp_indices = np.random.randint(low = 0, high = test_df.shape[0], size = 8)
fig_title = 'Random Images from test set'
# Predict and plot
predict_and_plot_df(test_df, 'filename', 'class', test_img_path, True, samp_indices,\
True, model, mod_inp_shape, ind_class_dict, 2, 9, 9, fig_title)
Model Details:
Model input image shape set to (500, 500, 3)
mod_file_pref = "MC_01"
custom_objects = [True, {"IoU": IoU}]
fm_path, hi_path, model = get_model(mod_file_pref, best_model = False, custom_objects = custom_objects,\
print_summary = False)
hist_plot_dict = {0: [{'loss': ['r-', 'Train_Set_Loss'],
'val_loss': ['b-', 'Val_Set_Loss']},
'Overall Loss vs. #Epoch', 'Overall Loss', '# Epochs'],
1: [{'class_op_loss': ['r-', 'Train_Set_Classfn_Loss'],
'val_class_op_loss': ['b-', 'Val_Set_Classfn_Loss']},
'Classfn Loss vs. #Epoch', 'Classfn Loss', '# Epochs'],
2: [{'class_op_categorical_accuracy': ['r-', 'Train_Set_Classfn_Accuracy'],
'val_class_op_categorical_accuracy': ['b-', 'Val_Set_Classfn_Accuracy']},
'Classfn Accuracy vs. #Epoch', 'Classfn Accuracy', '# Epochs'],
3: [{'reg_op_loss': ['r-', 'Train_Set_Regrn_Loss'],
'val_reg_op_loss': ['b-', 'Val_Set_Regrn_Loss']},
'Regression Loss vs. #Epoch', 'Regression Loss', '# Epochs'],
4: [{'reg_op_IoU': ['r-', 'Train_Set_BBOX_IoU'],
'val_reg_op_IoU': ['b-', 'Val_Set_BBOX_IoU']},
'BBOX IoU vs. #Epoch', 'Regression Accuracy', '# Epochs']
}
plot_lc(mod_file_pref, hi_path, hist_plot_dict, 3, 7, 8)
No augmentation will be done and shuffling will be turned off. 'preprocessing_function' will be set based on the model that's being loaded.
bm_name = 'InceptionResNetV2' # Base Model Name
mod_inp_shape = (500, 500, 3) # Target image size for model input
mod_bat_size = 16 # Batch Size
seq_rescale = iaa.size.Resize(size = mod_inp_shape[0], interpolation = cv2.INTER_CUBIC)
train_generator = batch_generator_from_df(train_df, train_img_path, 'filename', 'class', True,\
mod_bat_size, mod_inp_shape[0:2], aug = False, shuffle = False,\
preprocessing_function = preprocess_data)
val_generator = batch_generator_from_df(val_df, test_img_path, 'filename', 'class', True,\
mod_bat_size, mod_inp_shape[0:2], aug = False, shuffle = False,\
preprocessing_function = preprocess_data)
test_generator = batch_generator_from_df(test_df, test_img_path, 'filename', 'class', True,\
mod_bat_size, mod_inp_shape[0:2], aug = False, shuffle = False,\
preprocessing_function = preprocess_data)
Found 8144 images belonging to 196 classes Found 1176 images belonging to 196 classes Found 6865 images belonging to 196 classes
# Evaluate model on training set
res = model.evaluate(x = train_generator, steps = len(train_generator), verbose = 0, return_dict = True)
print("Training set: Overall loss is %0.4f" % res['loss'])
print("Training set: Classification loss is %0.4f" % res['class_op_loss'])
print("Training set: Regression loss is %0.4f" % res['reg_op_loss'])
print("Training set: Classification accuracy is %0.2f %%"\
% (100 * res['class_op_categorical_accuracy']))
print("Training set: BBOX IoU is %0.2f"\
% (res['reg_op_IoU']))
print()
# Evaluate model on validation set
res = model.evaluate(x = val_generator, steps = len(val_generator), verbose = 0, return_dict = True)
print("Validation set: Overall loss is %0.4f" % res['loss'])
print("Validation set: Classification loss is %0.4f" % res['class_op_loss'])
print("Validation set: Regression loss is %0.4f" % res['reg_op_loss'])
print("Validation set: Classification accuracy is %0.2f %%"\
% (100 * res['class_op_categorical_accuracy']))
print("Validation set: BBOX IoU is %0.2f"\
% (res['reg_op_IoU']))
print()
# Evaluate model on test set
res = model.evaluate(x = test_generator, steps = len(test_generator), verbose = 0, return_dict = True)
print("Test set: Overall loss is %0.4f" % res['loss'])
print("Test set: Classification loss is %0.4f" % res['class_op_loss'])
print("Test set: Regression loss is %0.4f" % res['reg_op_loss'])
print("Test set: Classification accuracy is %0.2f %%"\
% (100 * res['class_op_categorical_accuracy']))
print("Test set: BBOX IoU is %0.2f"\
% (res['reg_op_IoU']))
print()
Training set: Overall loss is 0.5619 Training set: Classification loss is 0.0568 Training set: Regression loss is 0.0005 Training set: Classification accuracy is 98.26 % Training set: BBOX IoU is 0.90 Validation set: Overall loss is 1.6065 Validation set: Classification loss is 0.8598 Validation set: Regression loss is 0.0007 Validation set: Classification accuracy is 83.67 % Validation set: BBOX IoU is 0.89 Test set: Overall loss is 1.5152 Test set: Classification loss is 0.7791 Test set: Regression loss is 0.0007 Test set: Classification accuracy is 84.30 % Test set: BBOX IoU is 0.89
### Use this to plot random images from a particular set
samp_indices = np.random.randint(low = 0, high = train_df.shape[0], size = 8)
fig_title = 'Random Images from training set'
# Predict and plot
predict_and_plot_df(train_df, 'filename', 'class', train_img_path, True, samp_indices,\
True, model, mod_inp_shape, ind_class_dict, 2, 9, 9, fig_title)
### Use this to plot random images from a particular set
samp_indices = np.random.randint(low = 0, high = test_df.shape[0], size = 8)
fig_title = 'Random Images from test set'
# Predict and plot
predict_and_plot_df(test_df, 'filename', 'class', test_img_path, True, samp_indices,\
True, model, mod_inp_shape, ind_class_dict, 2, 9, 9, fig_title)
Model Details:
Model input image shape set to (400, 400, 3)
mod_file_pref = "MD_01"
custom_objects = [True, {"IoU": IoU}]
fm_path, hi_path, model = get_model(mod_file_pref, best_model = False, custom_objects = custom_objects,\
print_summary = False)
hist_plot_dict = {0: [{'loss': ['r-', 'Train_Set_Loss'],
'val_loss': ['b-', 'Val_Set_Loss']},
'Overall Loss vs. #Epoch', 'Overall Loss', '# Epochs'],
1: [{'class_op_loss': ['r-', 'Train_Set_Classfn_Loss'],
'val_class_op_loss': ['b-', 'Val_Set_Classfn_Loss']},
'Classfn Loss vs. #Epoch', 'Classfn Loss', '# Epochs'],
2: [{'class_op_categorical_accuracy': ['r-', 'Train_Set_Classfn_Accuracy'],
'val_class_op_categorical_accuracy': ['b-', 'Val_Set_Classfn_Accuracy']},
'Classfn Accuracy vs. #Epoch', 'Classfn Accuracy', '# Epochs'],
3: [{'reg_op_loss': ['r-', 'Train_Set_Regrn_Loss'],
'val_reg_op_loss': ['b-', 'Val_Set_Regrn_Loss']},
'Regression Loss vs. #Epoch', 'Regression Loss', '# Epochs'],
4: [{'reg_op_IoU': ['r-', 'Train_Set_BBOX_IoU'],
'val_reg_op_IoU': ['b-', 'Val_Set_BBOX_IoU']},
'BBOX IoU vs. #Epoch', 'Regression Accuracy', '# Epochs']
}
plot_lc(mod_file_pref, hi_path, hist_plot_dict, 3, 7, 8)
No augmentation will be done and shuffling will be turned off. 'preprocessing_function' will be set based on the model that's being loaded.
bm_name = 'EfficientNet' # Base Model Name
mod_inp_shape = (400, 400, 3) # Target image size for model input
mod_bat_size = 16 # Batch Size
seq_rescale = iaa.size.Resize(size = mod_inp_shape[0], interpolation = cv2.INTER_CUBIC)
train_generator = batch_generator_from_df(train_df, train_img_path, 'filename', 'class', True,\
mod_bat_size, mod_inp_shape[0:2], aug = False, shuffle = False,\
preprocessing_function = preprocess_data)
val_generator = batch_generator_from_df(val_df, test_img_path, 'filename', 'class', True,\
mod_bat_size, mod_inp_shape[0:2], aug = False, shuffle = False,\
preprocessing_function = preprocess_data)
test_generator = batch_generator_from_df(test_df, test_img_path, 'filename', 'class', True,\
mod_bat_size, mod_inp_shape[0:2], aug = False, shuffle = False,\
preprocessing_function = preprocess_data)
Found 8144 images belonging to 196 classes Found 1176 images belonging to 196 classes Found 6865 images belonging to 196 classes
# Evaluate model on training set
res = model.evaluate(x = train_generator, steps = len(train_generator), verbose = 0, return_dict = True)
print("Training set: Overall loss is %0.4f" % res['loss'])
print("Training set: Classification loss is %0.4f" % res['class_op_loss'])
print("Training set: Regression loss is %0.4f" % res['reg_op_loss'])
print("Training set: Classification accuracy is %0.2f %%"\
% (100 * res['class_op_categorical_accuracy']))
print("Training set: BBOX IoU is %0.2f"\
% (res['reg_op_IoU']))
print()
# Evaluate model on validation set
res = model.evaluate(x = val_generator, steps = len(val_generator), verbose = 0, return_dict = True)
print("Validation set: Overall loss is %0.4f" % res['loss'])
print("Validation set: Classification loss is %0.4f" % res['class_op_loss'])
print("Validation set: Regression loss is %0.4f" % res['reg_op_loss'])
print("Validation set: Classification accuracy is %0.2f %%"\
% (100 * res['class_op_categorical_accuracy']))
print("Validation set: BBOX IoU is %0.2f"\
% (res['reg_op_IoU']))
print()
# Evaluate model on test set
res = model.evaluate(x = test_generator, steps = len(test_generator), verbose = 0, return_dict = True)
print("Test set: Overall loss is %0.4f" % res['loss'])
print("Test set: Classification loss is %0.4f" % res['class_op_loss'])
print("Test set: Regression loss is %0.4f" % res['reg_op_loss'])
print("Test set: Classification accuracy is %0.2f %%"\
% (100 * res['class_op_categorical_accuracy']))
print("Test set: BBOX IoU is %0.2f"\
% (res['reg_op_IoU']))
print()
Training set: Overall loss is 0.3263 Training set: Classification loss is 0.0052 Training set: Regression loss is 0.0003 Training set: Classification accuracy is 99.80 % Training set: BBOX IoU is 0.92 Validation set: Overall loss is 1.0458 Validation set: Classification loss is 0.4491 Validation set: Regression loss is 0.0006 Validation set: Classification accuracy is 91.58 % Validation set: BBOX IoU is 0.91 Test set: Overall loss is 0.9354 Test set: Classification loss is 0.4164 Test set: Regression loss is 0.0005 Test set: Classification accuracy is 92.19 % Test set: BBOX IoU is 0.91
### Use this to plot random images from a particular set
samp_indices = np.random.randint(low = 0, high = train_df.shape[0], size = 8)
fig_title = 'Random Images from training set'
# Predict and plot
predict_and_plot_df(train_df, 'filename', 'class', train_img_path, True, samp_indices,\
True, model, mod_inp_shape, ind_class_dict, 2, 9, 9, fig_title)
### Use this to plot random images from a particular set
samp_indices = np.random.randint(low = 0, high = test_df.shape[0], size = 8)
fig_title = 'Random Images from test set'
# Predict and plot
predict_and_plot_df(test_df, 'filename', 'class', test_img_path, True, samp_indices,\
True, model, mod_inp_shape, ind_class_dict, 2, 9, 9, fig_title)
In this section, an ensemble of the four models defined above is created and performance evaluated on the ensembled model.
# Define custom object IoU for IoU evaluation
custom_objects = [True, {"IoU": IoU}]
# Define dictionary of models
mod_dict = {"1": ["MA_01", 'ResNet50', 64, (224, 224, 3)],
"2": ["MB_01", 'DenseNet', 64, (224, 224, 3)],
"3": ["MC_01", 'InceptionResNetV2', 16, (500, 500, 3)],
"4": ["MD_01", 'EfficientNet', 16, (400, 400, 3)]
}
y_pred_train_list = [] # Placeholder to store training set predictions of each model
y_pred_val_list = [] # Placeholder to store validation set predictions of each model
y_pred_test_list = [] # Placeholder to store test set predictions of each model
for ind, value in mod_dict.items(): # Loop through model dictionary
mod_file_pref = value[0]
print(f"Loading model {mod_file_pref}...")
_, _, model = get_model(mod_file_pref, False, custom_objects, False)
bm_name = value[1] # Load base model name
mod_bat_size = value[2] # Load batch size to use while evaluating model
mod_inp_shape = value[3] # Load input shape to use while evaluating model
# Re-define rescaling sequence for this particular input shape
seq_rescale = iaa.size.Resize(size = mod_inp_shape[0], interpolation = cv2.INTER_CUBIC)
# Define train / test and val generators
print(f"Loading train/test/val generators...")
train_generator = batch_generator_from_df(train_df, train_img_path, 'filename', 'class', True,\
mod_bat_size, mod_inp_shape[0:2], False, False,\
preprocessing_function = preprocess_data)
val_generator = batch_generator_from_df(val_df, test_img_path, 'filename', 'class', True,\
mod_bat_size, mod_inp_shape[0:2], False, False,\
preprocessing_function = preprocess_data)
test_generator = batch_generator_from_df(test_df, test_img_path, 'filename', 'class', True,\
mod_bat_size, mod_inp_shape[0:2], False, False,\
preprocessing_function = preprocess_data)
print()
# Generate predictions on training, val and test sets for this model
print(f"Generating predictions for Training Set...")
y_pred_train = model.predict(x = train_generator, steps = len(train_generator), verbose = 0)
print(f"Generating predictions for Val Set...")
y_pred_val = model.predict(x = val_generator, steps = len(val_generator), verbose = 0)
print(f"Generating predictions for Test Set...")
y_pred_test = model.predict(x = test_generator, steps = len(test_generator), verbose = 0)
# Append current model predictions to the corresponding lists
y_pred_train_list.append(y_pred_train)
y_pred_val_list.append(y_pred_val)
y_pred_test_list.append(y_pred_test)
print()
# Model ensembling - Add the predictions from all the models together
y_pred_train = np.zeros_like(y_pred_train_list[0][0])
y_pred_val = np.zeros_like(y_pred_val_list[0][0])
y_pred_test = np.zeros_like(y_pred_test_list[0][0])
for ind, val in enumerate(y_pred_train_list):
y_pred_train += val[0]
for ind, val in enumerate(y_pred_val_list):
y_pred_val += val[0]
for ind, val in enumerate(y_pred_test_list):
y_pred_test += val[0]
# Obtain predicted class index from the ensembled model
y_pred_train_class_ind = np.argmax(y_pred_train, axis = 1)
y_pred_val_class_ind = np.argmax(y_pred_val, axis = 1)
y_pred_test_class_ind = np.argmax(y_pred_test, axis = 1)
# Obtain predicted class from the ensembled model
y_pred_train_class = np.array([ind_class_dict[ind] for ind in y_pred_train_class_ind])
y_pred_val_class = np.array([ind_class_dict[ind] for ind in y_pred_val_class_ind])
y_pred_test_class = np.array([ind_class_dict[ind] for ind in y_pred_test_class_ind])
# Extract true class
y_true_train_class = np.array(train_df['class'])
y_true_val_class = np.array(val_df['class'])
y_true_test_class = np.array(test_df['class'])
# Print accuracies of ensembled model
print("Training set accuracy of ensembled model is %0.2f %%"\
%(100 * np.mean(y_pred_train_class == y_true_train_class)))
print("Validation set accuracy of ensembled model is %0.2f %%"\
%(100 * np.mean(y_pred_val_class == y_true_val_class)))
print("Test set accuracy of ensembled model is %0.2f %%"\
%(100 * np.mean(y_pred_test_class == y_true_test_class)))
Loading model MA_01... Loading train/test/val generators... Found 8144 images belonging to 196 classes Found 1176 images belonging to 196 classes Found 6865 images belonging to 196 classes Generating predictions for Training Set... Generating predictions for Val Set... Generating predictions for Test Set... Loading model MB_01... Loading train/test/val generators... Found 8144 images belonging to 196 classes Found 1176 images belonging to 196 classes Found 6865 images belonging to 196 classes Generating predictions for Training Set... Generating predictions for Val Set... Generating predictions for Test Set... Loading model MC_01... Loading train/test/val generators... Found 8144 images belonging to 196 classes Found 1176 images belonging to 196 classes Found 6865 images belonging to 196 classes Generating predictions for Training Set... Generating predictions for Val Set... Generating predictions for Test Set... Loading model MD_01... Loading train/test/val generators... Found 8144 images belonging to 196 classes Found 1176 images belonging to 196 classes Found 6865 images belonging to 196 classes Generating predictions for Training Set... Generating predictions for Val Set... Generating predictions for Test Set... Training set accuracy of ensembled model is 99.63 % Validation set accuracy of ensembled model is 91.33 % Test set accuracy of ensembled model is 92.29 %
### Use this to plot random images corresponding to a particular label
# label = 5
# full_indices = np.nonzero(y_train == label)[0]
# samp_indices = list(np.random.choice(full_indices, size = 10))
# fig_title = f'Images from training set with actual label = {label}'
### Use this to plot random images corresponding to incorrect predictions from a particular set
# y_train_pred = np.argmax(model.predict(x = train_gen_cm, steps = len(train_gen_cm), verbose = 0),\
# axis = 1)
# full_indices = np.nonzero(y_train != y_train_pred)[0]
# samp_indices = list(np.random.choice(full_indices, size = 10))
# fig_title = 'Images from training set corresponding to incorrect predictions'
### Use this to plot random images corresponding to incorrect predictions of a particular label
# y_train_pred = np.argmax(model.predict(x = train_gen_cm, steps = len(train_gen_cm), verbose = 0),\
# axis = 1)
# label = 8
# full_indices = np.nonzero(y_train == label)[0]
# full_indices = full_indices[np.nonzero(y_train[full_indices] != y_train_pred[full_indices])]
# samp_indices = list(np.random.choice(full_indices, size = 10))
# fig_title = f'Images from training set with incorrect predictions of label = {label}'